diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index eb74930dec50..bc150446d620 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,3 +7,4 @@ # java-vertexai has maintainers /java-vertexai/ @googleapis/vertex-java-sdk @googleapis/cloud-java-team-teamsync +/java-bigquerystorage/ @googleapis/yoshi-java @googleapis/api-bigquery @googleapis/cloud-java-team-teamsync diff --git a/.github/workflows/java-bigquerystorage-ci.yaml b/.github/workflows/java-bigquerystorage-ci.yaml new file mode 100644 index 000000000000..1bf64bb7b55f --- /dev/null +++ b/.github/workflows/java-bigquerystorage-ci.yaml @@ -0,0 +1,125 @@ +on: + push: + paths-ignore: + - 'tutorials/**' + branches: + - main + pull_request: +name: java-bigquerystorage ci +env: + BUILD_SUBDIR: java-bigquerystorage +jobs: + filter: + runs-on: ubuntu-latest + outputs: + library: ${{ steps.filter.outputs.library }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + library: + - 'java-bigquerystorage/**' + units: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + java: [11, 17, 21, 25] + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/setup-java@v2 + with: + distribution: temurin + java-version: ${{matrix.java}} + - run: java -version + - run: .kokoro/build.sh + env: + JOB_TYPE: test + units-java8: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + # Building using Java 17 and run the tests with Java 8 runtime + name: "units (8)" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/setup-java@v3 + with: + java-version: 8 + distribution: zulu + - name: "Set jvm system property environment variable for surefire plugin (unit tests)" + # Maven surefire plugin (unit tests) allows us to specify JVM to run the tests. + # https://maven.apache.org/surefire/maven-surefire-plugin/test-mojo.html#jvm + run: echo "SUREFIRE_JVM_OPT=-Djvm=${JAVA_HOME}/bin/java -P !java17" >> $GITHUB_ENV + shell: bash + - uses: actions/setup-java@v3 + with: + java-version: 17 + distribution: zulu + - run: .kokoro/build.sh + env: + JOB_TYPE: test + windows: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: windows-latest + steps: + - name: Support longpaths + run: git config --system core.longpaths true + working-directory: . + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/setup-java@v2 + with: + distribution: temurin + java-version: 8 + - run: java -version + - run: .kokoro/build.sh + env: + JOB_TYPE: test + dependencies: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + strategy: + matrix: + java: [17] + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/setup-java@v2 + with: + distribution: temurin + java-version: ${{matrix.java}} + - run: java -version + - run: .kokoro/dependencies.sh + javadoc: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/setup-java@v3 + with: + distribution: temurin + java-version: 17 + - run: java -version + - run: .kokoro/build.sh + env: + JOB_TYPE: javadoc + lint: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/setup-java@v2 + with: + distribution: temurin + java-version: 17 + - run: java -version + - run: .kokoro/build.sh + env: + JOB_TYPE: lint diff --git a/.kokoro/build.sh b/.kokoro/build.sh index e604c8768b86..fcc004083ca3 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -35,8 +35,15 @@ fi RETURN_CODE=0 +if [[ -n "${BUILD_SUBDIR}" ]] +then + echo "Running in subdir: ${BUILD_SUBDIR}" + pushd "${BUILD_SUBDIR}" +fi + case ${JOB_TYPE} in test) + echo "SUREFIRE_JVM_OPT: ${SUREFIRE_JVM_OPT}" retry_with_backoff 3 10 \ mvn test \ -B -ntp \ @@ -48,7 +55,7 @@ case ${JOB_TYPE} in -Dflatten.skip=true \ -Danimal.sniffer.skip=true \ -Dmaven.wagon.http.retryHandler.count=5 \ - -T 1C + -T 1C ${SUREFIRE_JVM_OPT} RETURN_CODE=$? echo "Finished running unit tests" ;; @@ -125,6 +132,12 @@ case ${JOB_TYPE} in esac +if [[ -n "${BUILD_SUBDIR}" ]] +then + echo "Running in subdir: ${BUILD_SUBDIR}" + popd +fi + if [ "${REPORT_COVERAGE}" == "true" ]; then bash ${KOKORO_GFILE_DIR}/codecov.sh fi diff --git a/.kokoro/dependencies.sh b/.kokoro/dependencies.sh index bd8960246f66..6b9dedd2c3c2 100755 --- a/.kokoro/dependencies.sh +++ b/.kokoro/dependencies.sh @@ -49,6 +49,12 @@ function determineMavenOpts() { export MAVEN_OPTS=$(determineMavenOpts) +if [[ -n "${BUILD_SUBDIR}" ]] +then + echo "Running in subdir: ${BUILD_SUBDIR}" + pushd "${BUILD_SUBDIR}" +fi + # this should run maven enforcer retry_with_backoff 3 10 \ mvn install -B -V -ntp \ @@ -57,3 +63,9 @@ retry_with_backoff 3 10 \ -Dclirr.skip=true mvn -B dependency:analyze -DfailOnWarning=true + +if [[ -n "${BUILD_SUBDIR}" ]] +then + echo "Leaving subdir: ${BUILD_SUBDIR}" + popd +fi \ No newline at end of file diff --git a/gapic-libraries-bom/pom.xml b/gapic-libraries-bom/pom.xml index e7b3efd5a591..9befde80d91a 100644 --- a/gapic-libraries-bom/pom.xml +++ b/gapic-libraries-bom/pom.xml @@ -266,6 +266,13 @@ pom import + + com.google.cloud + google-cloud-bigquerystorage-bom + 3.19.1 + pom + import + com.google.cloud google-cloud-billing-bom diff --git a/generation/check_non_release_please_versions.sh b/generation/check_non_release_please_versions.sh index bb7e2f0e88e0..56db7e7107df 100755 --- a/generation/check_non_release_please_versions.sh +++ b/generation/check_non_release_please_versions.sh @@ -20,6 +20,11 @@ for pomFile in $(find . -mindepth 2 -name pom.xml | sort ); do continue fi + if [[ "${pomFile}" =~ .*/samples/.* ]]; then + echo "Skipping version check for samples directory" + continue + fi + if grep -n '.*' "$pomFile" | grep -v 'x-version-update'; then echo "Found version declaration(s) without x-version-update in: $pomFile" violations=$((violations + 1)) diff --git a/generation_config.yaml b/generation_config.yaml index 76a9c95606a8..e8431486189d 100644 --- a/generation_config.yaml +++ b/generation_config.yaml @@ -1,10 +1,6 @@ gapic_generator_version: 2.65.1 googleapis_commitish: 415914bd49d41beaae8a9adb348ee2587c93aa70 libraries_bom_version: 26.73.0 - -# the libraries are ordered with respect to library name, which is -# java-{library.library_name} or java-{library.api-shortname} when -# library.library_name is not defined. libraries: - api_shortname: accessapproval name_pretty: Access Approval @@ -14,7 +10,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/accessapproval/v1 - - api_shortname: accesscontextmanager name_pretty: Identity Access Context Manager product_documentation: n/a @@ -24,7 +19,6 @@ libraries: GAPICs: - proto_path: google/identity/accesscontextmanager/v1 - proto_path: google/identity/accesscontextmanager/type - - api_shortname: admanager name_pretty: Google Ad Manager API product_documentation: https://developers.google.com/ad-manager/api/beta @@ -40,7 +34,6 @@ libraries: GAPICs: - proto_path: google/ads/admanager/v1 requires_billing: true - - api_shortname: advisorynotifications name_pretty: Advisory Notifications API product_documentation: https://cloud.google.com/advisory-notifications/ @@ -48,7 +41,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/advisorynotifications/v1 - - api_shortname: aiplatform name_pretty: Vertex AI product_documentation: https://cloud.google.com/vertex-ai/docs @@ -61,7 +53,6 @@ libraries: GAPICs: - proto_path: google/cloud/aiplatform/v1 - proto_path: google/cloud/aiplatform/v1beta1 - - api_shortname: alloydb name_pretty: AlloyDB product_documentation: https://cloud.google.com/alloydb/ @@ -73,7 +64,6 @@ libraries: - proto_path: google/cloud/alloydb/v1 - proto_path: google/cloud/alloydb/v1alpha - proto_path: google/cloud/alloydb/v1beta - - api_shortname: alloydb name_pretty: AlloyDB connectors product_documentation: https://cloud.google.com/alloydb/docs @@ -88,7 +78,6 @@ libraries: - proto_path: google/cloud/alloydb/connectors/v1 - proto_path: google/cloud/alloydb/connectors/v1alpha - proto_path: google/cloud/alloydb/connectors/v1beta - - api_shortname: analyticsadmin name_pretty: Analytics Admin product_documentation: https://developers.google.com/analytics @@ -100,7 +89,6 @@ libraries: GAPICs: - proto_path: google/analytics/admin/v1alpha - proto_path: google/analytics/admin/v1beta - - api_shortname: analyticsdata name_pretty: Analytics Data product_documentation: https://developers.google.com/analytics/trusted-testing/analytics-data @@ -114,7 +102,6 @@ libraries: GAPICs: - proto_path: google/analytics/data/v1alpha - proto_path: google/analytics/data/v1beta - - api_shortname: analyticshub name_pretty: Analytics Hub API product_documentation: https://cloud.google.com/bigquery/TBD @@ -122,7 +109,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/bigquery/analyticshub/v1 - - api_shortname: apigateway name_pretty: API Gateway product_documentation: https://cloud.google.com/api-gateway/docs @@ -137,7 +123,6 @@ libraries: rest_documentation: https://cloud.google.com/api-gateway/docs/reference/rest GAPICs: - proto_path: google/cloud/apigateway/v1 - - api_shortname: apigeeconnect name_pretty: Apigee Connect product_documentation: https://cloud.google.com/apigee/docs/hybrid/v1.3/apigee-connect/ @@ -148,7 +133,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/apigeeconnect/v1 - - api_shortname: apigee-registry name_pretty: Registry API product_documentation: https://cloud.google.com/apigee/docs/api-hub/get-started-registry-api @@ -157,7 +141,6 @@ libraries: api_id: apigeeregistry.googleapis.com GAPICs: - proto_path: google/cloud/apigeeregistry/v1 - - api_shortname: apihub name_pretty: API hub API product_documentation: https://cloud.google.com/apigee/docs/apihub/what-is-api-hub @@ -183,7 +166,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/api/apikeys/v2 - - api_shortname: appengine name_pretty: App Engine Admin API product_documentation: https://cloud.google.com/appengine/docs/admin-api/ @@ -193,7 +175,6 @@ libraries: codeowner_team: '@googleapis/aap-dpes' GAPICs: - proto_path: google/appengine/v1 - - api_shortname: apphub name_pretty: App Hub API product_documentation: https://cloud.google.com/app-hub/docs/overview @@ -202,7 +183,6 @@ libraries: rpc_documentation: https://cloud.google.com/app-hub/docs/reference/rpc GAPICs: - proto_path: google/cloud/apphub/v1 - - api_shortname: area120tables name_pretty: Area 120 Tables product_documentation: https://area120.google.com/ @@ -212,7 +192,6 @@ libraries: distribution_name: com.google.area120:google-area120-tables GAPICs: - proto_path: google/area120/tables/v1alpha1 - - api_shortname: artifactregistry name_pretty: Artifact Registry product_documentation: https://cloud.google.com/artifact-registry @@ -229,7 +208,6 @@ libraries: GAPICs: - proto_path: google/devtools/artifactregistry/v1 - proto_path: google/devtools/artifactregistry/v1beta2 - - api_shortname: cloudasset name_pretty: Cloud Asset Inventory product_documentation: https://cloud.google.com/resource-manager/docs/cloud-asset-inventory/overview @@ -247,7 +225,6 @@ libraries: - proto_path: google/cloud/asset/v1p2beta1 - proto_path: google/cloud/asset/v1p5beta1 - proto_path: google/cloud/asset/v1p7beta1 - - api_shortname: assuredworkloads name_pretty: Assured Workloads for Government product_documentation: https://cloud.google.com/assured-workloads/ @@ -260,7 +237,6 @@ libraries: GAPICs: - proto_path: google/cloud/assuredworkloads/v1 - proto_path: google/cloud/assuredworkloads/v1beta1 - - api_shortname: automl name_pretty: Cloud Auto ML product_documentation: https://cloud.google.com/automl/docs/ @@ -276,14 +252,12 @@ libraries: GAPICs: - proto_path: google/cloud/automl/v1 - proto_path: google/cloud/automl/v1beta1 - - api_shortname: backupdr name_pretty: Backup and DR Service API product_documentation: https://cloud.google.com/backup-disaster-recovery/docs/concepts/backup-dr api_description: 'Backup and DR Service is a powerful, centralized, cloud-first backup and disaster recovery solution for cloud-based and hybrid workloads. ' - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-backupdr/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-backupdr/latest/overview release_level: stable distribution_name: com.google.cloud:google-cloud-backupdr api_id: backupdr.googleapis.com @@ -303,7 +277,6 @@ libraries: rpc_documentation: https://cloud.google.com/bare-metal/docs/reference/rpc GAPICs: - proto_path: google/cloud/baremetalsolution/v2 - - api_shortname: batch name_pretty: Cloud Batch product_documentation: https://cloud.google.com/ @@ -311,7 +284,6 @@ libraries: GAPICs: - proto_path: google/cloud/batch/v1 - proto_path: google/cloud/batch/v1alpha - - api_shortname: beyondcorp-appconnections name_pretty: BeyondCorp AppConnections product_documentation: https://cloud.google.com/beyondcorp-enterprise/ @@ -322,7 +294,6 @@ libraries: need for a traditional VPN. GAPICs: - proto_path: google/cloud/beyondcorp/appconnections/v1 - - api_shortname: beyondcorp-appconnectors name_pretty: BeyondCorp AppConnectors product_documentation: cloud.google.com/beyondcorp-enterprise/ @@ -330,7 +301,6 @@ libraries: AppConnectors. GAPICs: - proto_path: google/cloud/beyondcorp/appconnectors/v1 - - api_shortname: beyondcorp-appgateways name_pretty: BeyondCorp AppGateways product_documentation: https://cloud.google.com/beyondcorp-enterprise/ @@ -339,7 +309,6 @@ libraries: api_id: beyondcorp.googleapis.com GAPICs: - proto_path: google/cloud/beyondcorp/appgateways/v1 - - api_shortname: beyondcorp-clientconnectorservices name_pretty: BeyondCorp ClientConnectorServices product_documentation: https://cloud.google.com/beyondcorp-enterprise/ @@ -348,7 +317,6 @@ libraries: api_id: beyondcorp.googleapis.com GAPICs: - proto_path: google/cloud/beyondcorp/clientconnectorservices/v1 - - api_shortname: beyondcorp-clientgateways name_pretty: BeyondCorp ClientGateways product_documentation: https://cloud.google.com/beyondcorp-enterprise/ @@ -357,7 +325,6 @@ libraries: api_id: beyondcorp.googleapis.com GAPICs: - proto_path: google/cloud/beyondcorp/clientgateways/v1 - - api_shortname: biglake name_pretty: BigLake product_documentation: https://cloud.google.com/biglake @@ -368,7 +335,6 @@ libraries: - proto_path: google/cloud/bigquery/biglake/v1 - proto_path: google/cloud/biglake/v1 - proto_path: google/cloud/bigquery/biglake/v1alpha1 - - api_shortname: analyticshub name_pretty: Analytics Hub product_documentation: https://cloud.google.com/analytics-hub @@ -378,18 +344,15 @@ libraries: library_name: bigquery-data-exchange GAPICs: - proto_path: google/cloud/bigquery/dataexchange/v1beta1 - - api_shortname: bigqueryconnection name_pretty: Cloud BigQuery Connection product_documentation: https://cloud.google.com/bigquery/docs/reference/bigqueryconnection/rest api_description: allows users to manage BigQuery connections to external data sources. release_level: stable - client_documentation: - https://cloud.google.com/bigquery/docs/reference/reservations/rpc/google.cloud.bigquery.reservation.v1beta1 + client_documentation: https://cloud.google.com/bigquery/docs/reference/reservations/rpc/google.cloud.bigquery.reservation.v1beta1 GAPICs: - proto_path: google/cloud/bigquery/connection/v1 - proto_path: google/cloud/bigquery/connection/v1beta1 - - api_shortname: bigquerydatapolicy name_pretty: BigQuery DataPolicy API product_documentation: https://cloud.google.com/bigquery/docs/reference/datapolicy/ @@ -399,7 +362,6 @@ libraries: - proto_path: google/cloud/bigquery/datapolicies/v1beta1 - proto_path: google/cloud/bigquery/datapolicies/v2beta1 - proto_path: google/cloud/bigquery/datapolicies/v2 - - api_shortname: bigquerydatatransfer name_pretty: BigQuery Data Transfer Service product_documentation: https://cloud.google.com/bigquery/transfer/ @@ -409,7 +371,6 @@ libraries: issue_tracker: https://issuetracker.google.com/savedsearches/559654 GAPICs: - proto_path: google/cloud/bigquery/datatransfer/v1 - - api_shortname: bigquerymigration name_pretty: BigQuery Migration product_documentation: https://cloud.google.com/bigquery/docs @@ -418,7 +379,6 @@ libraries: GAPICs: - proto_path: google/cloud/bigquery/migration/v2 - proto_path: google/cloud/bigquery/migration/v2alpha - - api_shortname: bigqueryreservation name_pretty: Cloud BigQuery Reservation product_documentation: https://cloud.google.com/bigquery/docs/reference/reservations/rpc @@ -426,7 +386,30 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/bigquery/reservation/v1 - +- api_shortname: bigquerystorage + name_pretty: BigQuery Storage + product_documentation: https://cloud.google.com/bigquery/docs/reference/storage/ + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-bigquerystorage/latest/history + api_description: is an API for reading data stored in BigQuery. This API provides + direct, high-throughput read access to existing BigQuery tables, supports parallel + access with automatic liquid sharding, and allows fine-grained control over what + data is returned. + issue_tracker: https://issuetracker.google.com/savedsearches/559654 + release_level: stable + language: java + distribution_name: com.google.cloud:google-cloud-bigquerystorage + codeowner_team: '@googleapis/api-bigquery' + api_id: bigquerystorage.googleapis.com + transport: grpc + requires_billing: true + library_type: GAPIC_COMBO + recommended_package: com.google.cloud.bigquery.storage.v1 + GAPICs: + - proto_path: google/cloud/bigquery/storage/v1 + - proto_path: google/cloud/bigquery/storage/v1alpha + - proto_path: google/cloud/bigquery/storage/v1beta1 + - proto_path: google/cloud/bigquery/storage/v1beta2 + - proto_path: google/cloud/bigquery/storage/v1beta - api_shortname: cloudbilling name_pretty: Cloud Billing product_documentation: https://cloud.google.com/billing/docs @@ -439,7 +422,6 @@ libraries: rpc_documentation: https://cloud.google.com/billing/docs/reference/rpc GAPICs: - proto_path: google/cloud/billing/v1 - - api_shortname: billingbudgets name_pretty: Cloud Billing Budgets product_documentation: https://cloud.google.com/billing/docs/how-to/budgets @@ -449,7 +431,6 @@ libraries: GAPICs: - proto_path: google/cloud/billing/budgets/v1 - proto_path: google/cloud/billing/budgets/v1beta1 - - api_shortname: binaryauthorization name_pretty: Binary Authorization product_documentation: https://cloud.google.com/binary-authorization/docs @@ -465,13 +446,11 @@ libraries: GAPICs: - proto_path: google/cloud/binaryauthorization/v1 - proto_path: google/cloud/binaryauthorization/v1beta1 - - api_shortname: capacityplanner name_pretty: Capacity Planner API product_documentation: https://cloud.google.com/capacity-planner/docs api_description: Provides programmatic access to Capacity Planner features. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-capacityplanner/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-capacityplanner/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-capacityplanner api_id: capacityplanner.googleapis.com @@ -490,7 +469,6 @@ libraries: api_id: certificatemanager.googleapis.com GAPICs: - proto_path: google/cloud/certificatemanager/v1 - - api_shortname: cloudchannel name_pretty: Channel Services product_documentation: https://cloud.google.com/channel/docs @@ -504,7 +482,6 @@ libraries: rpc_documentation: https://cloud.google.com/channel/docs/reference/rpc GAPICs: - proto_path: google/cloud/channel/v1 - - api_shortname: chat name_pretty: Google Chat API product_documentation: https://developers.google.com/chat/concepts @@ -514,15 +491,13 @@ libraries: rest_documentation: https://developers.google.com/chat/api/reference/rest GAPICs: - proto_path: google/chat/v1 - - api_shortname: chronicle name_pretty: Chronicle API product_documentation: https://cloud.google.com/chronicle/docs/secops/secops-overview api_description: The Google Cloud Security Operations API, popularly known as the Chronicle API, serves endpoints that enable security analysts to analyze and mitigate a security threat throughout its lifecycle - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-chronicle/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-chronicle/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-chronicle api_id: chronicle.googleapis.com @@ -538,8 +513,7 @@ libraries: api_description: Cloud API Registry lets you discover, govern, use, and monitor Model Context Protocol (MCP) servers and tools provided by Google, or by your organization through Apigee API hub. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-cloudapiregistry/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-cloudapiregistry/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-cloudapiregistry api_id: cloudapiregistry.googleapis.com @@ -562,7 +536,6 @@ libraries: GAPICs: - proto_path: google/devtools/cloudbuild/v1 - proto_path: google/devtools/cloudbuild/v2 - - api_shortname: cloudcommerceconsumerprocurement name_pretty: Cloud Commerce Consumer Procurement product_documentation: https://cloud.google.com/marketplace/ @@ -573,7 +546,6 @@ libraries: GAPICs: - proto_path: google/cloud/commerce/consumer/procurement/v1 - proto_path: google/cloud/commerce/consumer/procurement/v1alpha1 - - api_shortname: cloudcontrolspartner name_pretty: Cloud Controls Partner API product_documentation: https://cloud.google.com/sovereign-controls-by-partners/docs/sovereign-partners @@ -583,27 +555,23 @@ libraries: GAPICs: - proto_path: google/cloud/cloudcontrolspartner/v1 - proto_path: google/cloud/cloudcontrolspartner/v1beta - - api_shortname: cloudquotas name_pretty: Cloud Quotas API product_documentation: https://cloud.google.com/cloudquotas/docs/ - api_description: "Cloud Quotas API provides GCP service consumers with management - and\n observability for resource usage, quotas, and restrictions of the services\n\ + api_description: "Cloud Quotas API provides GCP service consumers with management\ + \ and\n observability for resource usage, quotas, and restrictions of the services\n\ \ they consume." release_level: stable GAPICs: - proto_path: google/api/cloudquotas/v1 - proto_path: google/api/cloudquotas/v1beta - - api_shortname: cloudsecuritycompliance name_pretty: Cloud Security Compliance API - product_documentation: - https://cloud.google.com/security-command-center/docs/compliance-manager-overview + product_documentation: https://cloud.google.com/security-command-center/docs/compliance-manager-overview api_description: Compliance Manager uses software-defined controls that let you assess support for multiple compliance programs and security requirements within a Google Cloud organization - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-cloudsecuritycompliance/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-cloudsecuritycompliance/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-cloudsecuritycompliance api_id: cloudsecuritycompliance.googleapis.com @@ -621,21 +589,19 @@ libraries: GAPICs: - proto_path: google/cloud/support/v2 - proto_path: google/cloud/support/v2beta - - api_shortname: compute name_pretty: Compute Engine product_documentation: https://cloud.google.com/compute/ - api_description: "delivers virtual machines running in Google's innovative data - centers and worldwide fiber network. Compute Engine's tooling and workflow support + api_description: 'delivers virtual machines running in Google''s innovative data + centers and worldwide fiber network. Compute Engine''s tooling and workflow support enable scaling from single instances to global, load-balanced cloud computing. - Compute Engine's VMs boot quickly, come with persistent disk storage, deliver - consistent performance and are available in many configurations. " + Compute Engine''s VMs boot quickly, come with persistent disk storage, deliver + consistent performance and are available in many configurations. ' release_level: stable excluded_poms: grpc-google-cloud-compute-v1 excluded_dependencies: grpc-google-cloud-compute-v1 GAPICs: - proto_path: google/cloud/compute/v1 - - api_shortname: confidentialcomputing name_pretty: Confidential Computing API product_documentation: https://cloud.google.com/confidential-computing/ @@ -644,15 +610,12 @@ libraries: GAPICs: - proto_path: google/cloud/confidentialcomputing/v1 - proto_path: google/cloud/confidentialcomputing/v1alpha1 - - api_shortname: configdelivery name_pretty: Config Delivery API - product_documentation: - https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/concepts/fleet-packages + product_documentation: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/concepts/fleet-packages api_description: ConfigDelivery service manages the deployment of kubernetes configuration to a fleet of kubernetes clusters. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-configdelivery/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-configdelivery/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-configdelivery api_id: configdelivery.googleapis.com @@ -660,19 +623,16 @@ libraries: group_id: com.google.cloud cloud_api: true requires_billing: true - rest_documentation: - https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/reference/rest + rest_documentation: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/reference/rest GAPICs: - proto_path: google/cloud/configdelivery/v1beta - proto_path: google/cloud/configdelivery/v1 - api_shortname: connectgateway name_pretty: Connect Gateway API - product_documentation: - https://cloud.google.com/kubernetes-engine/enterprise/multicluster-management/gateway + product_documentation: https://cloud.google.com/kubernetes-engine/enterprise/multicluster-management/gateway api_description: The Connect Gateway service allows connectivity from external parties to connected Kubernetes clusters. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-connectgateway/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-connectgateway/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-connectgateway api_id: connectgateway.googleapis.com @@ -692,7 +652,6 @@ libraries: codeowner_team: '@googleapis/api-contact-center-insights' GAPICs: - proto_path: google/cloud/contactcenterinsights/v1 - - api_shortname: container name_pretty: Kubernetes Engine product_documentation: https://cloud.google.com/kubernetes-engine/ @@ -708,7 +667,6 @@ libraries: GAPICs: - proto_path: google/container/v1 - proto_path: google/container/v1beta1 - - api_shortname: containeranalysis name_pretty: Cloud Container Analysis product_documentation: https://cloud.google.com/container-registry/docs/container-analysis @@ -724,7 +682,6 @@ libraries: GAPICs: - proto_path: google/devtools/containeranalysis/v1 - proto_path: google/devtools/containeranalysis/v1beta1 - - api_shortname: contentwarehouse name_pretty: Document AI Warehouse product_documentation: https://cloud.google.com/document-warehouse/docs/overview @@ -733,7 +690,6 @@ libraries: metadata. GAPICs: - proto_path: google/cloud/contentwarehouse/v1 - - api_shortname: datafusion name_pretty: Cloud Data Fusion product_documentation: https://cloud.google.com/data-fusion/docs @@ -745,19 +701,17 @@ libraries: GAPICs: - proto_path: google/cloud/datafusion/v1 - proto_path: google/cloud/datafusion/v1beta1 - - api_shortname: databasecenter name_pretty: Database Center API product_documentation: https://cloud.google.com/database-center/docs/overview - api_description: Database Center provides an organization-wide, cross-product fleet - health platform to eliminate the overhead, complexity, and risk associated with - aggregating and summarizing health signals through custom dashboards. Through - Database Center’s fleet health dashboard and API, database platform teams that - are responsible for reliability, compliance, security, cost, and administration - of database fleets will now have a single pane of glass that pinpoints issues - relevant to each team. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-databasecenter/latest/overview + api_description: "Database Center provides an organization-wide, cross-product fleet\ + \ health platform to eliminate the overhead, complexity, and risk associated with\ + \ aggregating and summarizing health signals through custom dashboards. Through\ + \ Database Center\u2019s fleet health dashboard and API, database platform teams\ + \ that are responsible for reliability, compliance, security, cost, and administration\ + \ of database fleets will now have a single pane of glass that pinpoints issues\ + \ relevant to each team." + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-databasecenter/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-databasecenter api_id: databasecenter.googleapis.com @@ -777,7 +731,6 @@ libraries: GAPICs: - proto_path: google/cloud/datacatalog/v1 - proto_path: google/cloud/datacatalog/v1beta1 - - api_shortname: dataflow name_pretty: Dataflow product_documentation: https://cloud.google.com/dataflow/docs @@ -787,7 +740,6 @@ libraries: rpc_documentation: https://cloud.google.com/dataflow/docs/reference/rpc GAPICs: - proto_path: google/dataflow/v1beta3 - - api_shortname: dataform name_pretty: Cloud Dataform product_documentation: https://cloud.google.com/dataform/docs @@ -795,7 +747,6 @@ libraries: GAPICs: - proto_path: google/cloud/dataform/v1beta1 - proto_path: google/cloud/dataform/v1 - - api_shortname: datalabeling name_pretty: Data Labeling product_documentation: https://cloud.google.com/ai-platform/data-labeling/docs/ @@ -807,7 +758,6 @@ libraries: rpc_documentation: https://cloud.google.com/ai-platform/data-labeling/docs/reference/rpc GAPICs: - proto_path: google/cloud/datalabeling/v1beta1 - - api_shortname: datalineage name_pretty: Data Lineage product_documentation: https://cloud.google.com/data-catalog/docs/data-lineage/ @@ -815,7 +765,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/datacatalog/lineage/v1 - - api_shortname: datamanager name_pretty: Data Manager API product_documentation: https://developers.google.com/data-manager @@ -845,7 +794,6 @@ libraries: rpc_documentation: https://cloud.google.com/dataplex/docs/reference/rpc GAPICs: - proto_path: google/cloud/dataplex/v1 - - api_shortname: dataproc name_pretty: Dataproc product_documentation: https://cloud.google.com/dataproc @@ -858,7 +806,6 @@ libraries: rpc_documentation: https://cloud.google.com/dataproc/docs/reference/rpc GAPICs: - proto_path: google/cloud/dataproc/v1 - - api_shortname: metastore name_pretty: Dataproc Metastore product_documentation: https://cloud.google.com/dataproc-metastore/docs @@ -874,7 +821,6 @@ libraries: - proto_path: google/cloud/metastore/v1 - proto_path: google/cloud/metastore/v1alpha - proto_path: google/cloud/metastore/v1beta - - api_shortname: datastream name_pretty: Datastream product_documentation: https://cloud.google.com/datastream/docs @@ -886,7 +832,6 @@ libraries: GAPICs: - proto_path: google/cloud/datastream/v1 - proto_path: google/cloud/datastream/v1alpha1 - - api_shortname: clouddeploy name_pretty: Google Cloud Deploy product_documentation: https://cloud.google.com/deploy/docs @@ -897,13 +842,11 @@ libraries: codeowner_team: '@googleapis/aap-dpes' GAPICs: - proto_path: google/cloud/deploy/v1 - - api_shortname: developerconnect name_pretty: Developer Connect API product_documentation: https://cloud.google.com/developer-connect/docs/overview api_description: Connect third-party source code management to Google - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-developerconnect/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-developerconnect/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-developerconnect api_id: developerconnect.googleapis.com @@ -917,8 +860,7 @@ libraries: name_pretty: Device Streaming API product_documentation: https://cloud.google.com/device-streaming/docs api_description: The Cloud API for device streaming usage. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-devicestreaming/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-devicestreaming/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-devicestreaming api_id: devicestreaming.googleapis.com @@ -943,7 +885,6 @@ libraries: GAPICs: - proto_path: google/cloud/dialogflow/v2 - proto_path: google/cloud/dialogflow/v2beta1 - - api_shortname: dialogflow-cx name_pretty: Dialogflow CX product_documentation: https://cloud.google.com/dialogflow/cx/docs @@ -955,7 +896,6 @@ libraries: GAPICs: - proto_path: google/cloud/dialogflow/cx/v3 - proto_path: google/cloud/dialogflow/cx/v3beta1 - - api_shortname: discoveryengine name_pretty: Discovery Engine API product_documentation: https://cloud.google.com/discovery-engine/media/docs @@ -966,7 +906,6 @@ libraries: - proto_path: google/cloud/discoveryengine/v1 - proto_path: google/cloud/discoveryengine/v1alpha - proto_path: google/cloud/discoveryengine/v1beta - - api_shortname: distributedcloudedge name_pretty: Google Distributed Cloud Edge product_documentation: https://cloud.google.com/distributed-cloud/edge/latest/ @@ -978,7 +917,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/edgecontainer/v1 - - api_shortname: dlp name_pretty: Cloud Data Loss Prevention product_documentation: https://cloud.google.com/dlp/docs/ @@ -991,7 +929,6 @@ libraries: rpc_documentation: https://cloud.google.com/dlp/docs/reference/rpc GAPICs: - proto_path: google/privacy/dlp/v2 - - api_shortname: datamigration name_pretty: Database Migration Service product_documentation: https://cloud.google.com/database-migration/docs @@ -1004,7 +941,6 @@ libraries: rest_documentation: https://cloud.google.com/database-migration/docs/reference/rest GAPICs: - proto_path: google/cloud/clouddms/v1 - - api_shortname: documentai name_pretty: Document AI product_documentation: https://cloud.google.com/compute/docs/documentai/ @@ -1016,7 +952,6 @@ libraries: GAPICs: - proto_path: google/cloud/documentai/v1 - proto_path: google/cloud/documentai/v1beta3 - - api_shortname: domains name_pretty: Cloud Domains product_documentation: https://cloud.google.com/domains @@ -1026,7 +961,6 @@ libraries: - proto_path: google/cloud/domains/v1 - proto_path: google/cloud/domains/v1alpha2 - proto_path: google/cloud/domains/v1beta1 - - api_shortname: edgenetwork name_pretty: Distributed Cloud Edge Network API product_documentation: https://cloud.google.com/distributed-cloud/edge/latest/docs/overview @@ -1034,7 +968,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/edgenetwork/v1 - - api_shortname: enterpriseknowledgegraph name_pretty: Enterprise Knowledge Graph product_documentation: https://cloud.google.com/enterprise-knowledge-graph/docs/overview @@ -1043,7 +976,6 @@ libraries: an efficient and useful way. GAPICs: - proto_path: google/cloud/enterpriseknowledgegraph/v1 - - api_shortname: clouderrorreporting name_pretty: Error Reporting product_documentation: https://cloud.google.com/error-reporting @@ -1058,7 +990,6 @@ libraries: requires_billing: false GAPICs: - proto_path: google/devtools/clouderrorreporting/v1beta1 - - api_shortname: essentialcontacts name_pretty: Essential Contacts API product_documentation: https://cloud.google.com/resource-manager/docs/managing-notification-contacts/ @@ -1068,7 +999,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/essentialcontacts/v1 - - api_shortname: eventarc name_pretty: Eventarc product_documentation: https://cloud.google.com/eventarc/docs @@ -1082,7 +1012,6 @@ libraries: rpc_documentation: https://cloud.google.com/eventarc/docs/reference/rpc GAPICs: - proto_path: google/cloud/eventarc/v1 - - api_shortname: eventarcpublishing name_pretty: Eventarc Publishing product_documentation: https://cloud.google.com/eventarc/docs @@ -1094,7 +1023,6 @@ libraries: rpc_documentation: https://cloud.google.com/eventarc/docs/reference/rpc GAPICs: - proto_path: google/cloud/eventarc/publishing/v1 - - api_shortname: file name_pretty: Cloud Filestore API product_documentation: https://cloud.google.com/filestore/docs @@ -1107,16 +1035,13 @@ libraries: GAPICs: - proto_path: google/cloud/filestore/v1 - proto_path: google/cloud/filestore/v1beta1 - - api_shortname: financialservices name_pretty: Financial Services API - product_documentation: - https://cloud.google.com/financial-services/anti-money-laundering/docs/concepts/overview + product_documentation: https://cloud.google.com/financial-services/anti-money-laundering/docs/concepts/overview api_description: Google Cloud's Anti Money Laundering AI (AML AI) product is an API that scores AML risk. Use it to identify more risk, more defensibly, with fewer false positives and reduced time per review. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-financialservices/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-financialservices/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-financialservices api_id: financialservices.googleapis.com @@ -1141,15 +1066,13 @@ libraries: - proto_path: google/cloud/functions/v2 - proto_path: google/cloud/functions/v2alpha - proto_path: google/cloud/functions/v2beta - - api_shortname: gdchardwaremanagement name_pretty: GDC Hardware Management API product_documentation: https://cloud.google.com/distributed-cloud/edge/latest/docs api_description: Google Distributed Cloud connected allows you to run Kubernetes clusters on dedicated hardware provided and maintained by Google that is separate from the Google Cloud data center. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-gdchardwaremanagement/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-gdchardwaremanagement/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-gdchardwaremanagement api_id: gdchardwaremanagement.googleapis.com @@ -1159,16 +1082,14 @@ libraries: GAPICs: - proto_path: google/cloud/gdchardwaremanagement/v1alpha requires_billing: true - rpc_documentation: - https://cloud.google.com/distributed-cloud/edge/latest/docs/reference/hardware/rpc + rpc_documentation: https://cloud.google.com/distributed-cloud/edge/latest/docs/reference/hardware/rpc - api_shortname: geminidataanalytics name_pretty: Data Analytics API with Gemini product_documentation: https://cloud.google.com/gemini/docs/conversational-analytics-api/overview api_description: Use Conversational Analytics API to build an artificial intelligence (AI)-powered chat interface, or data agent, that answers questions about structured data using natural language. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-geminidataanalytics/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-geminidataanalytics/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-geminidataanalytics api_id: geminidataanalytics.googleapis.com @@ -1187,7 +1108,6 @@ libraries: library_name: gke-backup GAPICs: - proto_path: google/cloud/gkebackup/v1 - - api_shortname: connectgateway name_pretty: Connect Gateway API product_documentation: https://cloud.google.com/anthos/multicluster-management/gateway/ @@ -1199,7 +1119,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/gkeconnect/gateway/v1beta1 - - api_shortname: gke-multi-cloud name_pretty: Anthos Multicloud product_documentation: https://cloud.google.com/anthos/clusters/docs/multi-cloud @@ -1208,7 +1127,6 @@ libraries: api_id: gkemulticloud.googleapis.com GAPICs: - proto_path: google/cloud/gkemulticloud/v1 - - api_shortname: gkehub name_pretty: GKE Hub API product_documentation: https://cloud.google.com/anthos/gke/docs/ @@ -1225,16 +1143,13 @@ libraries: - proto_path: google/cloud/gkehub/v1beta1 - proto_path: google/cloud/gkehub/policycontroller/v1beta - proto_path: google/cloud/gkehub/servicemesh/v1beta - - api_shortname: gkerecommender name_pretty: GKE Recommender API - product_documentation: - https://cloud.google.com/kubernetes-engine/docs/how-to/machine-learning/inference-quickstart + product_documentation: https://cloud.google.com/kubernetes-engine/docs/how-to/machine-learning/inference-quickstart api_description: lets you analyze the performance and cost-efficiency of your inference workloads, and make data-driven decisions about resource allocation and model deployment strategies. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-gkerecommender/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-gkerecommender/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-gkerecommender api_id: gkerecommender.googleapis.com @@ -1255,7 +1170,6 @@ libraries: library_name: grafeas GAPICs: - proto_path: grafeas/v1 - - api_shortname: gsuiteaddons name_pretty: Google Workspace Add-ons API product_documentation: https://developers.google.com/workspace/add-ons/overview @@ -1271,14 +1185,11 @@ libraries: - proto_path: google/apps/script/type/gmail - proto_path: google/apps/script/type/sheets - proto_path: google/apps/script/type/slides - - api_shortname: hypercomputecluster name_pretty: Cluster Director API - product_documentation: - https://cloud.google.com/blog/products/compute/managed-slurm-and-other-cluster-director-enhancements + product_documentation: https://cloud.google.com/blog/products/compute/managed-slurm-and-other-cluster-director-enhancements api_description: simplifies cluster management across compute, network, and storage - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-hypercomputecluster/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-hypercomputecluster/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-hypercomputecluster api_id: hypercomputecluster.googleapis.com @@ -1304,7 +1215,6 @@ libraries: - proto_path: google/iam/v2beta - proto_path: google/iam/v3 - proto_path: google/iam/v3beta - - api_shortname: iam-admin name_pretty: IAM Admin API product_documentation: https://cloud.google.com/iam/docs/apis @@ -1314,7 +1224,6 @@ libraries: api_id: iam.googleapis.com GAPICs: - proto_path: google/iam/admin/v1 - - api_shortname: iamcredentials name_pretty: IAM Service Account Credentials API product_documentation: https://cloud.google.com/iam/credentials/reference/rest/ @@ -1325,7 +1234,6 @@ libraries: issue_tracker: https://issuetracker.google.com/issues/new?component=187161&template=0 GAPICs: - proto_path: google/iam/credentials/v1 - - api_shortname: iap name_pretty: Cloud Identity-Aware Proxy API product_documentation: https://cloud.google.com/iap @@ -1348,7 +1256,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/ids/v1 - - api_shortname: infra-manager name_pretty: Infrastructure Manager API product_documentation: https://cloud.google.com/infrastructure-manager/docs/overview @@ -1357,7 +1264,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/config/v1 - - api_shortname: cloudiot name_pretty: Cloud Internet of Things (IoT) Core product_documentation: https://cloud.google.com/iot @@ -1374,8 +1280,7 @@ libraries: name_pretty: Merchant Issue Resolution API product_documentation: https://developers.google.com/merchant/api api_description: Programatically manage your Merchant Issues - client_documentation: - https://cloud.google.com/java/docs/reference/google-shopping-merchant-issue-resolution/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-shopping-merchant-issue-resolution/latest/overview release_level: stable distribution_name: com.google.shopping:google-shopping-merchant-issue-resolution api_id: merchantapi.googleapis.com @@ -1387,13 +1292,11 @@ libraries: - proto_path: google/shopping/merchant/issueresolution/v1beta library_name: java-shopping-merchant-issue-resolution requires_billing: true - - api_shortname: merchantapi name_pretty: Merchant Order Tracking API product_documentation: https://developers.google.com/merchant/api api_description: Programmatically manage your Merchant Center Accounts - client_documentation: - https://cloud.google.com/java/docs/reference/google-shopping-merchant-order-tracking/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-shopping-merchant-order-tracking/latest/overview release_level: stable distribution_name: com.google.shopping:google-shopping-merchant-order-tracking api_id: merchantapi.googleapis.com @@ -1405,7 +1308,6 @@ libraries: - proto_path: google/shopping/merchant/ordertracking/v1beta library_name: java-shopping-merchant-order-tracking requires_billing: true - - api_shortname: cloudkms name_pretty: Cloud Key Management Service product_documentation: https://cloud.google.com/kms @@ -1421,7 +1323,6 @@ libraries: issue_tracker: https://issuetracker.google.com/savedsearches/5264932 GAPICs: - proto_path: google/cloud/kms/v1 - - api_shortname: kmsinventory name_pretty: KMS Inventory API product_documentation: https://cloud.google.com/kms/docs/ @@ -1430,7 +1331,6 @@ libraries: rpc_documentation: https://cloud.google.com/kms/docs/reference/rpc GAPICs: - proto_path: google/cloud/kms/inventory/v1 - - api_shortname: language name_pretty: Natural Language product_documentation: https://cloud.google.com/natural-language/docs/ @@ -1446,14 +1346,12 @@ libraries: - proto_path: google/cloud/language/v1 - proto_path: google/cloud/language/v1beta2 - proto_path: google/cloud/language/v2 - - api_shortname: licensemanager name_pretty: License Manager API product_documentation: https://cloud.google.com/compute/docs/instances/windows/ms-licensing api_description: License Manager is a tool to manage and track third-party licenses on Google Cloud. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-licensemanager/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-licensemanager/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-licensemanager api_id: licensemanager.googleapis.com @@ -1473,15 +1371,13 @@ libraries: rpc_documentation: https://cloud.google.com/life-sciences/docs/reference/rpc GAPICs: - proto_path: google/cloud/lifesciences/v2beta - - api_shortname: locationfinder name_pretty: Cloud Location Finder API product_documentation: https://cloud.google.com/location-finder/docs/overview api_description: Cloud Location Finder is a public API that offers a repository of all Google Cloud and Google Distributed Cloud locations, as well as cloud locations for other cloud providers. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-locationfinder/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-locationfinder/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-locationfinder api_id: locationfinder.googleapis.com @@ -1512,8 +1408,7 @@ libraries: product_documentation: https://cloud.google.com/unified-maintenance/docs/overview api_description: The Maintenance API provides a centralized view of planned disruptive maintenance events across supported Google Cloud products. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-maintenance/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-maintenance/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-maintenance api_id: maintenance.googleapis.com @@ -1538,13 +1433,11 @@ libraries: GAPICs: - proto_path: google/cloud/managedidentities/v1 - proto_path: google/cloud/managedidentities/v1beta1 - - api_shortname: managedkafka name_pretty: Managed Service for Apache Kafka product_documentation: https://cloud.google.com/managed-kafka api_description: Manage Apache Kafka clusters and resources. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-managedkafka/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-managedkafka/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-managedkafka api_id: managedkafka.googleapis.com @@ -1566,13 +1459,11 @@ libraries: distribution_name: com.google.maps:google-maps-addressvalidation GAPICs: - proto_path: google/maps/addressvalidation/v1 - - api_shortname: maps-area-insights name_pretty: Places Insights API product_documentation: https://developers.google.com/maps/documentation/places-insights api_description: Places Insights API. - client_documentation: - https://cloud.google.com/java/docs/reference/google-maps-area-insights/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-maps-area-insights/latest/overview release_level: preview distribution_name: com.google.maps:google-maps-area-insights api_id: maps-area-insights.googleapis.com @@ -1584,14 +1475,12 @@ libraries: requires_billing: true - api_shortname: maps-fleetengine name_pretty: Local Rides and Deliveries API - product_documentation: - https://developers.google.com/maps/documentation/transportation-logistics/mobility + product_documentation: https://developers.google.com/maps/documentation/transportation-logistics/mobility api_description: Enables Fleet Engine for access to the On Demand Rides and Deliveries and Last Mile Fleet Solution APIs. Customer's use of Google Maps Content in the Cloud Logging Services is subject to the Google Maps Platform Terms of Service located at https://cloud.google.com/maps-platform/terms. - client_documentation: - https://cloud.google.com/java/docs/reference/google-maps-fleetengine/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-maps-fleetengine/latest/overview release_level: preview distribution_name: com.google.maps:google-maps-fleetengine api_id: maps-fleetengine.googleapis.com @@ -1603,14 +1492,12 @@ libraries: requires_billing: true - api_shortname: maps-fleetengine-delivery name_pretty: Last Mile Fleet Solution Delivery API - product_documentation: - https://developers.google.com/maps/documentation/transportation-logistics/mobility + product_documentation: https://developers.google.com/maps/documentation/transportation-logistics/mobility api_description: Enables Fleet Engine for access to the On Demand Rides and Deliveries and Last Mile Fleet Solution APIs. Customer's use of Google Maps Content in the Cloud Logging Services is subject to the Google Maps Platform Terms of Service located at https://cloud.google.com/maps-platform/terms. - client_documentation: - https://cloud.google.com/java/docs/reference/google-maps-fleetengine-delivery/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-maps-fleetengine-delivery/latest/overview release_level: preview distribution_name: com.google.maps:google-maps-fleetengine-delivery api_id: maps-fleetengine-delivery.googleapis.com @@ -1623,15 +1510,14 @@ libraries: - api_shortname: maps-mapsplatformdatasets name_pretty: Maps Platform Datasets API product_documentation: https://developers.google.com/maps/documentation - api_description: "The Maps Platform Datasets API enables developers to ingest geospatially-tied - datasets\n that they can use to enrich their experience of Maps Platform solutions - (e.g. styling, routing)." + api_description: "The Maps Platform Datasets API enables developers to ingest geospatially-tied\ + \ datasets\n that they can use to enrich their experience of Maps Platform\ + \ solutions (e.g. styling, routing)." api_id: mapsplatformdatasets.googleapis.com distribution_name: com.google.maps:google-maps-mapsplatformdatasets cloud_api: false GAPICs: - proto_path: google/maps/mapsplatformdatasets/v1 - - api_shortname: maps-places name_pretty: Places API (New) product_documentation: https://developers.google.com/maps/documentation/places/web-service/ @@ -1642,15 +1528,13 @@ libraries: cloud_api: false GAPICs: - proto_path: google/maps/places/v1 - - api_shortname: routeoptimization name_pretty: Route Optimization API product_documentation: https://developers.google.com/maps/documentation/route-optimization api_description: The Route Optimization API assigns tasks and routes to a vehicle fleet, optimizing against the objectives and constraints that you supply for your transportation goals. - client_documentation: - https://cloud.google.com/java/docs/reference/google-maps-routeoptimization/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-maps-routeoptimization/latest/overview release_level: preview distribution_name: com.google.maps:google-maps-routeoptimization api_id: routeoptimization.googleapis.com @@ -1661,10 +1545,8 @@ libraries: - proto_path: google/maps/routeoptimization/v1 library_name: maps-routeoptimization requires_billing: true - rest_documentation: - https://developers.google.com/maps/documentation/route-optimization/reference/rest/ - rpc_documentation: - https://developers.google.com/maps/documentation/route-optimization/reference/rpc + rest_documentation: https://developers.google.com/maps/documentation/route-optimization/reference/rest/ + rpc_documentation: https://developers.google.com/maps/documentation/route-optimization/reference/rpc - api_shortname: maps-routing name_pretty: Routes API product_documentation: https://developers.google.com/maps/documentation/routes @@ -1678,7 +1560,6 @@ libraries: cloud_api: false GAPICs: - proto_path: google/maps/routing/v2 - - api_shortname: maps-solar name_pretty: Solar API product_documentation: https://developers.google.com/maps/documentation/solar/overview @@ -1722,7 +1603,6 @@ libraries: requires_billing: false GAPICs: - proto_path: google/cloud/mediatranslation/v1beta1 - - api_shortname: meet name_pretty: Google Meet API product_documentation: https://developers.google.com/meet/api/guides/overview @@ -1731,7 +1611,6 @@ libraries: GAPICs: - proto_path: google/apps/meet/v2 - proto_path: google/apps/meet/v2beta - - api_shortname: memcache name_pretty: Cloud Memcache product_documentation: https://cloud.google.com/memorystore/ @@ -1741,7 +1620,6 @@ libraries: GAPICs: - proto_path: google/cloud/memcache/v1 - proto_path: google/cloud/memcache/v1beta2 - - api_shortname: migrationcenter name_pretty: Migration Center API product_documentation: https://cloud.google.com/migration-center/docs/migration-center-overview @@ -1750,15 +1628,13 @@ libraries: cloud environments to Google Cloud GAPICs: - proto_path: google/cloud/migrationcenter/v1 - - api_shortname: modelarmor name_pretty: Model Armor API product_documentation: https://cloud.google.com/security-command-center/docs/model-armor-overview api_description: Model Armor helps you protect against risks like prompt injection, harmful content, and data leakage in generative AI applications by letting you define policies that filter user prompts and model responses. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-modelarmor/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-modelarmor/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-modelarmor api_id: modelarmor.googleapis.com @@ -1782,7 +1658,6 @@ libraries: issue_tracker: https://issuetracker.google.com/savedsearches/559785 GAPICs: - proto_path: google/monitoring/v3 - - api_shortname: monitoring-dashboards name_pretty: Monitoring Dashboards product_documentation: https://cloud.google.com/monitoring/charts/dashboards @@ -1795,18 +1670,15 @@ libraries: api_id: monitoring.googleapis.com GAPICs: - proto_path: google/monitoring/dashboard/v1 - - api_shortname: monitoring-metricsscope name_pretty: Monitoring Metrics Scopes - product_documentation: - https://cloud.google.com/monitoring/api/ref_v3/rest/v1/locations.global.metricsScopes + product_documentation: https://cloud.google.com/monitoring/api/ref_v3/rest/v1/locations.global.metricsScopes api_description: The metrics scope defines the set of Google Cloud projects whose metrics the current Google Cloud project can access. api_id: monitoring.googleapis.com distribution_name: com.google.cloud:google-cloud-monitoring-metricsscope GAPICs: - proto_path: google/monitoring/metricsscope/v1 - - api_shortname: netapp name_pretty: NetApp API product_documentation: https://cloud.google.com/netapp/volumes/docs/discover/overview @@ -1815,11 +1687,9 @@ libraries: scalable performance with global availability. GAPICs: - proto_path: google/cloud/netapp/v1 - - api_shortname: networkmanagement name_pretty: Network Management API - product_documentation: - https://cloud.google.com/network-intelligence-center/docs/connectivity-tests/reference/networkmanagement/rest/ + product_documentation: https://cloud.google.com/network-intelligence-center/docs/connectivity-tests/reference/networkmanagement/rest/ api_description: provides a collection of network performance monitoring and diagnostic capabilities. library_name: network-management @@ -1827,7 +1697,6 @@ libraries: GAPICs: - proto_path: google/cloud/networkmanagement/v1 - proto_path: google/cloud/networkmanagement/v1beta1 - - api_shortname: networksecurity name_pretty: Network Security API product_documentation: https://cloud.google.com/traffic-director/docs/reference/network-security/rest @@ -1837,7 +1706,6 @@ libraries: GAPICs: - proto_path: google/cloud/networksecurity/v1 - proto_path: google/cloud/networksecurity/v1beta1 - - api_shortname: networkconnectivity name_pretty: Network Connectivity Center product_documentation: https://cloud.google.com/network-connectivity/docs @@ -1848,7 +1716,6 @@ libraries: GAPICs: - proto_path: google/cloud/networkconnectivity/v1 - proto_path: google/cloud/networkconnectivity/v1alpha1 - - api_shortname: networkservices name_pretty: Network Services API product_documentation: https://cloud.google.com/products/networking @@ -1856,8 +1723,7 @@ libraries: on top of planet-scale infrastructure that leverages automation, advanced AI, and programmability, enabling enterprises to connect, scale, secure, modernize and optimize their infrastructure. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-networkservices/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-networkservices/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-networkservices api_id: networkservices.googleapis.com @@ -1880,7 +1746,6 @@ libraries: - proto_path: google/cloud/notebooks/v1 - proto_path: google/cloud/notebooks/v1beta1 - proto_path: google/cloud/notebooks/v2 - - api_shortname: cloudoptimization name_pretty: Cloud Fleet Routing product_documentation: https://cloud.google.com/optimization/docs @@ -1893,14 +1758,12 @@ libraries: rpc_documentation: https://cloud.google.com/optimization/docs/reference/rpc GAPICs: - proto_path: google/cloud/optimization/v1 - - api_shortname: oracledatabase name_pretty: Oracle Database@Google Cloud API product_documentation: https://cloud.google.com/oracle/database/docs api_description: The Oracle Database@Google Cloud API provides a set of APIs to manage Oracle database services, such as Exadata and Autonomous Databases. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-oracledatabase/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-oracledatabase/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-oracledatabase api_id: oracledatabase.googleapis.com @@ -1925,18 +1788,15 @@ libraries: GAPICs: - proto_path: google/cloud/orchestration/airflow/service/v1 - proto_path: google/cloud/orchestration/airflow/service/v1beta1 - - api_shortname: orgpolicy name_pretty: Cloud Organization Policy product_documentation: n/a api_description: n/a release_level: stable - client_documentation: - https://cloud.google.com/java/docs/reference/proto-google-cloud-orgpolicy-v1/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/proto-google-cloud-orgpolicy-v1/latest/overview GAPICs: - proto_path: google/cloud/orgpolicy/v1 - proto_path: google/cloud/orgpolicy/v2 - - api_shortname: osconfig name_pretty: OS Config API product_documentation: https://cloud.google.com/compute/docs/os-patch-management @@ -1950,7 +1810,6 @@ libraries: - proto_path: google/cloud/osconfig/v1 - proto_path: google/cloud/osconfig/v1alpha - proto_path: google/cloud/osconfig/v1beta - - api_shortname: oslogin name_pretty: Cloud OS Login product_documentation: https://cloud.google.com/compute/docs/oslogin/ @@ -1961,14 +1820,12 @@ libraries: GAPICs: - proto_path: google/cloud/oslogin/v1 - proto_path: google/cloud/oslogin/v1beta - - api_shortname: parallelstore name_pretty: Parallelstore API product_documentation: https://cloud/parallelstore?hl=en api_description: 'Parallelstore is based on Intel DAOS and delivers up to 6.3x greater read throughput performance compared to competitive Lustre scratch offerings. ' - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-parallelstore/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-parallelstore/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-parallelstore api_id: parallelstore.googleapis.com @@ -1986,8 +1843,7 @@ libraries: to store, access and manage the lifecycle of your workload parameters. Parameter Manager aims to make management of sensitive application parameters effortless for customers without diminishing focus on security. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-parametermanager/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-parametermanager/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-parametermanager api_id: parametermanager.googleapis.com @@ -2009,7 +1865,6 @@ libraries: requires_billing: false GAPICs: - proto_path: google/cloud/phishingprotection/v1beta1 - - api_shortname: policytroubleshooter name_pretty: IAM Policy Troubleshooter API product_documentation: https://cloud.google.com/iam/docs/troubleshooting-access @@ -2025,7 +1880,6 @@ libraries: GAPICs: - proto_path: google/cloud/policytroubleshooter/v1 - proto_path: google/cloud/policytroubleshooter/iam/v3 - - api_shortname: policysimulator name_pretty: Policy Simulator API product_documentation: https://cloud.google.com/policysimulator/docs/overview @@ -2033,7 +1887,6 @@ libraries: and viewing a Replay. GAPICs: - proto_path: google/cloud/policysimulator/v1 - - api_shortname: cloudprivatecatalog name_pretty: Private Catalog product_documentation: https://cloud.google.com/private-catalog/docs @@ -2044,11 +1897,9 @@ libraries: api_id: privatecatalog.googleapis.com GAPICs: - proto_path: google/cloud/privatecatalog/v1beta1 - - api_shortname: privilegedaccessmanager name_pretty: Privileged Access Manager API - product_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-privilegedaccessmanager/latest/overview + product_documentation: https://cloud.google.com/java/docs/reference/google-cloud-privilegedaccessmanager/latest/overview api_description: Privileged Access Manager (PAM) helps you on your journey towards least privilege and helps mitigate risks tied to privileged access misuse orabuse. PAM allows you to shift from always-on standing privileges towards on-demand access @@ -2061,8 +1912,7 @@ libraries: access for developers for critical deployment or maintenance, temporary access for operators for data ingestion and audits, JIT access to service accounts for automated tasks, and more. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-privilegedaccessmanager/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-privilegedaccessmanager/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-privilegedaccessmanager api_id: privilegedaccessmanager.googleapis.com @@ -2086,7 +1936,6 @@ libraries: api_id: cloudprofiler.googleapis.com GAPICs: - proto_path: google/devtools/cloudprofiler/v2 - - api_shortname: publicca name_pretty: Public Certificate Authority API product_documentation: https://cloud.google.com/certificate-manager/docs/public-ca @@ -2098,14 +1947,12 @@ libraries: GAPICs: - proto_path: google/cloud/security/publicca/v1beta1 - proto_path: google/cloud/security/publicca/v1 - - api_shortname: rapidmigrationassessment name_pretty: Rapid Migration Assessment API product_documentation: https://cloud.google.com/migration-center/docs api_description: Rapid Migration Assessment API GAPICs: - proto_path: google/cloud/rapidmigrationassessment/v1 - - api_shortname: recaptchaenterprise name_pretty: reCAPTCHA Enterprise product_documentation: https://cloud.google.com/recaptcha-enterprise/docs/ @@ -2118,7 +1965,6 @@ libraries: GAPICs: - proto_path: google/cloud/recaptchaenterprise/v1 - proto_path: google/cloud/recaptchaenterprise/v1beta1 - - api_shortname: recommendationengine name_pretty: Recommendations AI product_documentation: https://cloud.google.com/recommendations-ai/ @@ -2126,7 +1972,6 @@ libraries: library_name: recommendations-ai GAPICs: - proto_path: google/cloud/recommendationengine/v1beta1 - - api_shortname: recommender name_pretty: Recommender product_documentation: https://cloud.google.com/recommendations/ @@ -2136,7 +1981,6 @@ libraries: GAPICs: - proto_path: google/cloud/recommender/v1 - proto_path: google/cloud/recommender/v1beta1 - - api_shortname: redis name_pretty: Cloud Redis product_documentation: https://cloud.google.com/memorystore/docs/redis/ @@ -2149,7 +1993,6 @@ libraries: GAPICs: - proto_path: google/cloud/redis/v1 - proto_path: google/cloud/redis/v1beta1 - - api_shortname: redis-cluster name_pretty: Google Cloud Memorystore for Redis API product_documentation: https://cloud.google.com/memorystore/docs/cluster @@ -2157,7 +2000,6 @@ libraries: GAPICs: - proto_path: google/cloud/redis/cluster/v1 - proto_path: google/cloud/redis/cluster/v1beta1 - - api_shortname: cloudresourcemanager name_pretty: Resource Manager API product_documentation: https://cloud.google.com/resource-manager @@ -2169,7 +2011,6 @@ libraries: issue_tracker: https://issuetracker.google.com/savedsearches/559757 GAPICs: - proto_path: google/cloud/resourcemanager/v3 - - api_shortname: retail name_pretty: Cloud Retail product_documentation: https://cloud.google.com/solutions/retail @@ -2179,7 +2020,6 @@ libraries: - proto_path: google/cloud/retail/v2 - proto_path: google/cloud/retail/v2alpha - proto_path: google/cloud/retail/v2beta - - api_shortname: run name_pretty: Cloud Run product_documentation: https://cloud.google.com/run/docs @@ -2189,13 +2029,11 @@ libraries: rpc_documentation: https://cloud.google.com/run/docs/reference/rpc GAPICs: - proto_path: google/cloud/run/v2 - - api_shortname: saasservicemgmt name_pretty: SaaS Runtime API product_documentation: https://cloud.google.com/saas-runtime/docs/overview api_description: "Model, deploy, and operate your SaaS at scale.\t" - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-saasservicemgmt/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-saasservicemgmt/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-saasservicemgmt api_id: saasservicemgmt.googleapis.com @@ -2223,7 +2061,6 @@ libraries: GAPICs: - proto_path: google/cloud/scheduler/v1 - proto_path: google/cloud/scheduler/v1beta1 - - api_shortname: secretmanager name_pretty: Secret Management product_documentation: https://cloud.google.com/solutions/secrets-management/ @@ -2234,18 +2071,15 @@ libraries: GAPICs: - proto_path: google/cloud/secretmanager/v1 - proto_path: google/cloud/secretmanager/v1beta2 - # v1beta1's version is in a different proto path from the rest - proto_path: google/cloud/secrets/v1beta1 - - api_shortname: securesourcemanager name_pretty: Secure Source Manager API product_documentation: https://cloud.google.com/secure-source-manager/docs/overview - api_description: "Regionally deployed, single-tenant managed source code repository - hosted on\n Google Cloud." + api_description: "Regionally deployed, single-tenant managed source code repository\ + \ hosted on\n Google Cloud." release_level: stable GAPICs: - proto_path: google/cloud/securesourcemanager/v1 - - api_shortname: privateca name_pretty: Certificate Authority Service product_documentation: https://cloud.google.com/certificate-authority-service/docs @@ -2259,8 +2093,6 @@ libraries: GAPICs: - proto_path: google/cloud/security/privateca/v1 - proto_path: google/cloud/security/privateca/v1beta1 - -# duplicated api_shortname - api_shortname: securitycenter name_pretty: Security Command Center product_documentation: https://cloud.google.com/security-command-center @@ -2280,7 +2112,6 @@ libraries: - proto_path: google/cloud/securitycenter/v1beta1 - proto_path: google/cloud/securitycenter/v1p1beta1 - proto_path: google/cloud/securitycenter/v2 - - api_shortname: securitycenter name_pretty: Security Command Center Settings API product_documentation: https://cloud.google.com/security-command-center/ @@ -2293,7 +2124,6 @@ libraries: rest_documentation: https://cloud.google.com/security-command-center/docs/reference/rest GAPICs: - proto_path: google/cloud/securitycenter/settings/v1beta1 - - api_shortname: securitycentermanagement name_pretty: Security Center Management API product_documentation: https://cloud.google.com/securitycentermanagement/docs/overview @@ -2301,7 +2131,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/securitycentermanagement/v1 - - api_shortname: securityposture name_pretty: Security Posture API product_documentation: https://cloud.google.com/security-command-center/docs/security-posture-overview @@ -2312,7 +2141,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/securityposture/v1 - - api_shortname: servicecontrol name_pretty: Service Control API product_documentation: https://cloud.google.com/service-infrastructure/docs/overview/ @@ -2324,7 +2152,6 @@ libraries: GAPICs: - proto_path: google/api/servicecontrol/v1 - proto_path: google/api/servicecontrol/v2 - - api_shortname: servicemanagement name_pretty: Service Management API product_documentation: https://cloud.google.com/service-infrastructure/docs/overview/ @@ -2338,7 +2165,6 @@ libraries: api_id: servicemanagement.googleapis.com GAPICs: - proto_path: google/api/servicemanagement/v1 - - api_shortname: serviceusage name_pretty: Service Usage product_documentation: https://cloud.google.com/service-usage/docs/overview @@ -2349,7 +2175,6 @@ libraries: GAPICs: - proto_path: google/api/serviceusage/v1 - proto_path: google/api/serviceusage/v1beta1 - - api_shortname: servicedirectory name_pretty: Service Directory product_documentation: https://cloud.google.com/service-directory/ @@ -2361,7 +2186,6 @@ libraries: GAPICs: - proto_path: google/cloud/servicedirectory/v1 - proto_path: google/cloud/servicedirectory/v1beta1 - - api_shortname: servicehealth name_pretty: Service Health API product_documentation: https://cloud.google.com/service-health/docs/overview @@ -2370,7 +2194,6 @@ libraries: rpc_documentation: https://cloud.google.com/service-health/docs/reference/rpc GAPICs: - proto_path: google/cloud/servicehealth/v1 - - api_shortname: cloudshell name_pretty: Cloud Shell product_documentation: https://cloud.google.com/shell/docs @@ -2384,7 +2207,6 @@ libraries: rpc_documentation: https://cloud.google.com/shell/docs/reference/rpc GAPICs: - proto_path: google/cloud/shell/v1 - - api_shortname: css name_pretty: CSS API product_documentation: https://developers.google.com/comparison-shopping-services/api @@ -2395,14 +2217,11 @@ libraries: distribution_name: com.google.shopping:google-shopping-css GAPICs: - proto_path: google/shopping/css/v1 - - # duplicated api_shortname - api_shortname: merchantapi name_pretty: Merchant API product_documentation: https://developers.google.com/merchant/api api_description: Programmatically manage your Merchant Center accounts. - client_documentation: - https://cloud.google.com/java/docs/reference/google-shopping-merchant-accounts/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-shopping-merchant-accounts/latest/overview release_level: stable distribution_name: com.google.shopping:google-shopping-merchant-accounts api_id: merchantapi.googleapis.com @@ -2418,8 +2237,7 @@ libraries: name_pretty: Merchant Conversions API product_documentation: https://developers.google.com/merchant/api api_description: Programmatically manage your Merchant Center accounts. - client_documentation: - https://cloud.google.com/java/docs/reference/google-shopping-merchant-conversions/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-shopping-merchant-conversions/latest/overview release_level: stable distribution_name: com.google.shopping:google-shopping-merchant-conversions api_id: shopping-merchant-conversions.googleapis.com @@ -2430,13 +2248,11 @@ libraries: - proto_path: google/shopping/merchant/conversions/v1 - proto_path: google/shopping/merchant/conversions/v1beta requires_billing: true - - api_shortname: merchantapi name_pretty: Merchant API product_documentation: https://developers.google.com/merchant/api api_description: Programmatically manage your Merchant Center accounts. - client_documentation: - https://cloud.google.com/java/docs/reference/google-shopping-merchant-datasources/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-shopping-merchant-datasources/latest/overview release_level: stable distribution_name: com.google.shopping:google-shopping-merchant-datasources api_id: merchantapi.googleapis.com @@ -2463,8 +2279,7 @@ libraries: name_pretty: Merchant LFP API product_documentation: https://developers.google.com/merchant/api api_description: Programmatically manage your Merchant Center accounts. - client_documentation: - https://cloud.google.com/java/docs/reference/google-shopping-merchant-lfp/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-shopping-merchant-lfp/latest/overview release_level: stable distribution_name: com.google.shopping:google-shopping-merchant-lfp api_id: shopping-merchant-lfp.googleapis.com @@ -2475,13 +2290,11 @@ libraries: - proto_path: google/shopping/merchant/lfp/v1 - proto_path: google/shopping/merchant/lfp/v1beta requires_billing: true - - api_shortname: shopping-merchant-notifications name_pretty: Merchant Notifications API product_documentation: https://developers.google.com/merchant/api api_description: Programmatically manage your Merchant Center accounts. - client_documentation: - https://cloud.google.com/java/docs/reference/google-shopping-merchant-notifications/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-shopping-merchant-notifications/latest/overview release_level: stable distribution_name: com.google.shopping:google-shopping-merchant-notifications api_id: shopping-merchant-notifications.googleapis.com @@ -2496,8 +2309,7 @@ libraries: name_pretty: Merchant API product_documentation: https://developers.google.com/merchant/api api_description: Programmatically manage your products. - client_documentation: - https://cloud.google.com/java/docs/reference/google-shopping-merchant-productstudio/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-shopping-merchant-productstudio/latest/overview release_level: preview distribution_name: com.google.shopping:google-shopping-merchant-productstudio api_id: merchantapi.googleapis.com @@ -2512,8 +2324,7 @@ libraries: name_pretty: Merchant API product_documentation: https://developers.google.com/merchant/api api_description: Programmatically manage your Merchant Center accounts. - client_documentation: - https://cloud.google.com/java/docs/reference/google-shopping-merchant-products/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-shopping-merchant-products/latest/overview release_level: stable distribution_name: com.google.shopping:google-shopping-merchant-products api_id: merchantapi.googleapis.com @@ -2528,8 +2339,7 @@ libraries: name_pretty: Merchant API product_documentation: https://developers.google.com/merchant/api api_description: Programmatically manage your Merchant Center accounts. - client_documentation: - https://cloud.google.com/java/docs/reference/google-shopping-merchant-promotions/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-shopping-merchant-promotions/latest/overview release_level: stable distribution_name: com.google.shopping:google-shopping-merchant-promotions api_id: merchantapi.googleapis.com @@ -2545,8 +2355,7 @@ libraries: name_pretty: Merchant Quota API product_documentation: https://developers.google.com/merchant/api api_description: Programmatically manage your Merchant Center accounts. - client_documentation: - https://cloud.google.com/java/docs/reference/google-shopping-merchant-quota/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-shopping-merchant-quota/latest/overview release_level: stable distribution_name: com.google.shopping:google-shopping-merchant-quota api_id: shopping-merchant-quota.googleapis.com @@ -2557,7 +2366,6 @@ libraries: - proto_path: google/shopping/merchant/quota/v1 - proto_path: google/shopping/merchant/quota/v1beta requires_billing: true - - api_shortname: merchantapi name_pretty: Merchant API product_documentation: https://developers.google.com/merchant/api @@ -2570,13 +2378,11 @@ libraries: - proto_path: google/shopping/merchant/reports/v1 - proto_path: google/shopping/merchant/reports/v1beta - proto_path: google/shopping/merchant/reports/v1alpha - - api_shortname: merchantapi name_pretty: Merchant API product_documentation: https://developers.google.com/merchant/api api_description: Programmatically manage your Merchant Center Accounts. - client_documentation: - https://cloud.google.com/java/docs/reference/google-shopping-merchant-reviews/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-shopping-merchant-reviews/latest/overview release_level: preview distribution_name: com.google.shopping:google-shopping-merchant-reviews library_type: GAPIC_AUTO @@ -2586,16 +2392,13 @@ libraries: - proto_path: google/shopping/merchant/reviews/v1beta requires_billing: true library_name: shopping-merchant-reviews - - api_shortname: spanneradapter name_pretty: Cloud Spanner Adapter API - product_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-spanneradapter/latest/overview + product_documentation: https://cloud.google.com/java/docs/reference/google-cloud-spanneradapter/latest/overview api_description: The Cloud Spanner Adapter service allows native drivers of supported database dialects to interact directly with Cloud Spanner by wrapping the underlying wire protocol used by the driver in a gRPC stream. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-spanneradapter/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-spanneradapter/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-spanneradapter api_id: spanneradapter.googleapis.com @@ -2620,7 +2423,6 @@ libraries: - proto_path: google/cloud/speech/v1 - proto_path: google/cloud/speech/v1p1beta1 - proto_path: google/cloud/speech/v2 - - api_shortname: storagetransfer name_pretty: Storage Transfer Service product_documentation: https://cloud.google.com/storage-transfer-service @@ -2630,15 +2432,13 @@ libraries: release_level: stable GAPICs: - proto_path: google/storagetransfer/v1 - - api_shortname: storagebatchoperations name_pretty: Storage Batch Operations API product_documentation: https://cloud.google.com/storage/docs/batch-operations/overview api_description: Storage batch operations is a Cloud Storage management feature that performs operations on billions of Cloud Storage objects in a serverless manner. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-storagebatchoperations/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-storagebatchoperations/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-storagebatchoperations api_id: storagebatchoperations.googleapis.com @@ -2654,7 +2454,6 @@ libraries: api_description: Provides insights capability on Google Cloud Storage GAPICs: - proto_path: google/cloud/storageinsights/v1 - - api_shortname: jobs name_pretty: Talent Solution product_documentation: https://cloud.google.com/solutions/talent-solution/ @@ -2670,7 +2469,6 @@ libraries: GAPICs: - proto_path: google/cloud/talent/v4 - proto_path: google/cloud/talent/v4beta1 - - api_shortname: cloudtasks name_pretty: Cloud Tasks product_documentation: https://cloud.google.com/tasks/docs/ @@ -2688,7 +2486,6 @@ libraries: - proto_path: google/cloud/tasks/v2 - proto_path: google/cloud/tasks/v2beta2 - proto_path: google/cloud/tasks/v2beta3 - - api_shortname: telcoautomation name_pretty: Telco Automation API product_documentation: https://cloud.google.com/telecom-network-automation @@ -2698,7 +2495,6 @@ libraries: GAPICs: - proto_path: google/cloud/telcoautomation/v1 - proto_path: google/cloud/telcoautomation/v1alpha1 - - api_shortname: texttospeech name_pretty: Cloud Text-to-Speech product_documentation: https://cloud.google.com/text-to-speech @@ -2713,7 +2509,6 @@ libraries: GAPICs: - proto_path: google/cloud/texttospeech/v1 - proto_path: google/cloud/texttospeech/v1beta1 - - api_shortname: tpu name_pretty: Cloud TPU product_documentation: https://cloud.google.com/tpu/docs @@ -2725,7 +2520,6 @@ libraries: - proto_path: google/cloud/tpu/v1 - proto_path: google/cloud/tpu/v2 - proto_path: google/cloud/tpu/v2alpha1 - - api_shortname: cloudtrace name_pretty: Stackdriver Trace product_documentation: https://cloud.google.com/trace/docs/ @@ -2739,7 +2533,6 @@ libraries: GAPICs: - proto_path: google/devtools/cloudtrace/v1 - proto_path: google/devtools/cloudtrace/v2 - - api_shortname: translate name_pretty: Cloud Translation product_documentation: https://cloud.google.com/translate/docs/ @@ -2754,14 +2547,12 @@ libraries: GAPICs: - proto_path: google/cloud/translate/v3 - proto_path: google/cloud/translate/v3beta1 - - api_shortname: memorystore name_pretty: Memorystore API product_documentation: https://cloud.google.com/memorystore/docs/valkey api_description: Memorystore for Valkey is a fully managed Valkey Cluster service for Google Cloud. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-memorystore/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-memorystore/latest/overview release_level: stable api_id: memorystore.googleapis.com library_type: GAPIC_AUTO @@ -2784,8 +2575,7 @@ libraries: with associated metadata, automatically generate embeddings from your data, and perform fast approximate nearest neighbor (ANN) searches to find semantically similar items at scale. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-vectorsearch/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-vectorsearch/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-vectorsearch api_id: vectorsearch.googleapis.com @@ -2811,7 +2601,6 @@ libraries: - proto_path: google/cloud/videointelligence/v1p1beta1 - proto_path: google/cloud/videointelligence/v1p2beta1 - proto_path: google/cloud/videointelligence/v1p3beta1 - - api_shortname: livestream name_pretty: Live Stream API product_documentation: https://cloud.google.com/livestream/ @@ -2822,7 +2611,6 @@ libraries: distribution_name: com.google.cloud:google-cloud-live-stream GAPICs: - proto_path: google/cloud/video/livestream/v1 - - api_shortname: videostitcher name_pretty: Video Stitcher API product_documentation: https://cloud.google.com/video-stitcher/ @@ -2832,7 +2620,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/video/stitcher/v1 - - api_shortname: transcoder name_pretty: Video Transcoder product_documentation: https://cloud.google.com/transcoder/docs @@ -2846,7 +2633,6 @@ libraries: rpc_documentation: https://cloud.google.com/transcoder/docs/reference/rpc GAPICs: - proto_path: google/cloud/video/transcoder/v1 - - api_shortname: vision name_pretty: Cloud Vision product_documentation: https://cloud.google.com/vision/docs/ @@ -2863,14 +2649,12 @@ libraries: - proto_path: google/cloud/vision/v1p2beta1 - proto_path: google/cloud/vision/v1p3beta1 - proto_path: google/cloud/vision/v1p4beta1 - - api_shortname: visionai name_pretty: Vision AI API product_documentation: https://cloud.google.com/vision-ai/docs api_description: Vertex AI Vision is an AI-powered platform to ingest, analyze and store video data. - client_documentation: - https://cloud.google.com/java/docs/reference/google-cloud-visionai/latest/overview + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-visionai/latest/overview release_level: preview distribution_name: com.google.cloud:google-cloud-visionai api_id: visionai.googleapis.com @@ -2890,7 +2674,6 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/vmmigration/v1 - - api_shortname: vmwareengine name_pretty: Google Cloud VMware Engine product_documentation: https://cloud.google.com/vmware-engine/ @@ -2899,7 +2682,6 @@ libraries: rest_documentation: https://cloud.google.com/vmware-engine/docs/reference/rest GAPICs: - proto_path: google/cloud/vmwareengine/v1 - - api_shortname: vpcaccess name_pretty: Serverless VPC Access product_documentation: https://cloud.google.com/vpc/docs/serverless-vpc-access @@ -2910,18 +2692,17 @@ libraries: release_level: stable GAPICs: - proto_path: google/cloud/vpcaccess/v1 - - api_shortname: webrisk name_pretty: Web Risk product_documentation: https://cloud.google.com/web-risk/docs/ - api_description: is a Google Cloud service that lets client applications check URLs - against Google's constantly updated lists of unsafe web resources. Unsafe web - resources include social engineering sites—such as phishing and deceptive sites—and - sites that host malware or unwanted software. With the Web Risk API, you can quickly - identify known bad sites, warn users before they click infected links, and prevent - users from posting links to known infected pages from your site. The Web Risk - API includes data on more than a million unsafe URLs and stays up to date by examining - billions of URLs each day. + api_description: "is a Google Cloud service that lets client applications check\ + \ URLs against Google's constantly updated lists of unsafe web resources. Unsafe\ + \ web resources include social engineering sites\u2014such as phishing and deceptive\ + \ sites\u2014and sites that host malware or unwanted software. With the Web Risk\ + \ API, you can quickly identify known bad sites, warn users before they click\ + \ infected links, and prevent users from posting links to known infected pages\ + \ from your site. The Web Risk API includes data on more than a million unsafe\ + \ URLs and stays up to date by examining billions of URLs each day." release_level: stable requires_billing: false issue_tracker: '' @@ -2930,7 +2711,6 @@ libraries: GAPICs: - proto_path: google/cloud/webrisk/v1 - proto_path: google/cloud/webrisk/v1beta1 - - api_shortname: websecurityscanner name_pretty: Cloud Security Scanner product_documentation: https://cloud.google.com/security-scanner/docs/ @@ -2945,7 +2725,6 @@ libraries: - proto_path: google/cloud/websecurityscanner/v1 - proto_path: google/cloud/websecurityscanner/v1alpha - proto_path: google/cloud/websecurityscanner/v1beta - - api_shortname: workflowexecutions name_pretty: Cloud Workflow Executions product_documentation: https://cloud.google.com/workflows @@ -2958,7 +2737,6 @@ libraries: GAPICs: - proto_path: google/cloud/workflows/executions/v1 - proto_path: google/cloud/workflows/executions/v1beta - - api_shortname: workflows name_pretty: Cloud Workflows product_documentation: https://cloud.google.com/workflows @@ -2970,7 +2748,6 @@ libraries: GAPICs: - proto_path: google/cloud/workflows/v1 - proto_path: google/cloud/workflows/v1beta - - api_shortname: workspaceevents name_pretty: Google Workspace Events API product_documentation: https://developers.google.com/workspace/events @@ -2980,7 +2757,6 @@ libraries: GAPICs: - proto_path: google/apps/events/subscriptions/v1 - proto_path: google/apps/events/subscriptions/v1beta - - api_shortname: workstations name_pretty: Cloud Workstations product_documentation: https://cloud.google.com/workstations diff --git a/java-bigquerystorage/.OwlBot-hermetic.yaml b/java-bigquerystorage/.OwlBot-hermetic.yaml new file mode 100644 index 000000000000..54a72ded4010 --- /dev/null +++ b/java-bigquerystorage/.OwlBot-hermetic.yaml @@ -0,0 +1,98 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +deep-remove-regex: +- /java-bigquerystorage/grpc-google-.*/src +- /java-bigquerystorage/proto-google-.*/src +- /java-bigquerystorage/google-.*/src +deep-preserve-regex: +- /java-bigquerystorage/google-.*/src/test/java/com/google/cloud/.*/v.*/it/IT.*Test.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ToProtoConverter.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/AppendFormats.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQuerySchemaUtil.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQuerySchemaUtilTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPoolTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/RequestProfiler.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/TelemetryMetrics.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v.*/BigQueryReadSettings.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v.*/BigQueryReadClient.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/util +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v.*/stub/BigQueryReadStubSettings.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v.*/stub/EnhancedBigQueryReadStub.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v.*/stub/EnhancedBigQueryReadStubSettings.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v.*/stub/readrows +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v.*/stub +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v.*/it +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/util +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v.*/BigQueryReadClientTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/BQTableSchemaToProtoDescriptorTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/BQV2ToBQStorageConverterTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/BigDecimalByteStringEncoderTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/BigQueryReadClientTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/CivilTimeEncoderTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/FakeBigQueryWrite.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/FakeBigQueryWriteImpl.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/FakeClock.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/FakeScheduledExecutorService.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/JsonStreamWriterTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/JsonToProtoMessageTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/ProtoSchemaConverterTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/RequestProfilerTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/SchemaCompatibilityTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/StreamWriterTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/StreamWriterV2Test.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/RequestProfilerTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/it/BigQueryResource.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/it/SimpleRowReader.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/stub/EnhancedBigQueryReadStubSettingsTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/stub/ResourceHeaderTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1.*/stub/readrows/ReadRowsRetryTest.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClient.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageSettings.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStub.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettings.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/BQTableSchemaToProtoDescriptor.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/BQV2ToBQStorageConverter.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/BigDecimalByteStringEncoder.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/CivilTimeEncoder.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/JsonStreamWriter.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/JsonToProtoMessage.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/OnSchemaUpdateRunnable.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/ProtoSchemaConverter.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/SchemaCompatibility.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/StreamConnection.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/StreamWriter.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/StreamWriterV2.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1.*/Waiter.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Singletons.java +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/reflect-config.json +- /java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/resource-config.json +deep-copy-regex: +- source: /google/cloud/bigquery/storage/(v.*)/.*-java/proto-google-.*/src + dest: /owl-bot-staging/java-bigquerystorage/$1/proto-google-cloud-bigquerystorage-$1/src +- source: /google/cloud/bigquery/storage/(v.*)/.*-java/grpc-google-.*/src + dest: /owl-bot-staging/java-bigquerystorage/$1/grpc-google-cloud-bigquerystorage-$1/src +- source: /google/cloud/bigquery/storage/(v.*)/.*-java/gapic-google-.*/src + dest: /owl-bot-staging/java-bigquerystorage/$1/google-cloud-bigquerystorage/src diff --git a/java-bigquerystorage/.cloudbuild/samples_build.yaml b/java-bigquerystorage/.cloudbuild/samples_build.yaml new file mode 100644 index 000000000000..333515cfd7f8 --- /dev/null +++ b/java-bigquerystorage/.cloudbuild/samples_build.yaml @@ -0,0 +1,32 @@ +steps: +- name: gcr.io/cloud-devrel-public-resources/java8 + entrypoint: ls + args: [ + '-alt', + ] +- name: gcr.io/cloud-devrel-public-resources/java8 + entrypoint: curl + args: [ + '--header', + 'Metadata-Flavor: Google', + 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email' + ] +- name: gcr.io/cloud-devrel-public-resources/java8 + entrypoint: pwd +- name: gcr.io/cloud-devrel-public-resources/java8 + entrypoint: bash + args: [ + '.kokoro/build.sh' + ] + env: + - 'JOB_TYPE=samples' + - 'GOOGLE_CLOUD_PROJECT=cloud-java-ci-sample' + - 'BIGTABLE_TESTING_INSTANCE=instance' +- name: gcr.io/cloud-devrel-public-resources/java8 + entrypoint: echo + args: [ + 'Sample job succeeded', + ] +timeout: 3600s +options: + defaultLogsBucketBehavior: REGIONAL_USER_OWNED_BUCKET diff --git a/java-bigquerystorage/.readme-partials.yaml b/java-bigquerystorage/.readme-partials.yaml new file mode 100644 index 000000000000..c9416e4a266e --- /dev/null +++ b/java-bigquerystorage/.readme-partials.yaml @@ -0,0 +1,30 @@ +custom_content: | + ## OpenTelemetry support + The client supports emitting metrics to OpenTelemetry. This is disabled by default. It can be enabled by calling + ``` + JsonStreamWriter.Builder.setEnableOpenTelemetry(true) + ``` + The following metric attributes are supported. + | Key | Value | + |-----------------|------------------------------------------------------------------------------------------------------------------------------------| + | `error_code` | Specifies error code in the event an append request fails, or a connection ends. | + | `is_retry` | Indicates this was a retry operation. This can be set for either ack’ed requests or connection retry attempts. | + | `table_id` | Holds fully qualified name of destination table | + | `trace_field_1` | If a colon-separated traceId is provided, this holds the first portion. Must be non-empty. Currently populated only for Dataflow. | + | `trace_field_2` | If a colon-separated traceId is provided, this holds the second portion. Must be non-empty. Currently populated only for Dataflow. | + | `trace_field_3` | If a colon-separated traceId is provided, this holds the third portion. Must be non-empty. Currently populated only for Dataflow. | + | `writer_id` | Specifies writer instance id. | + The following metrics are supported. + | Name | Kind | Description | + |------------------------------|---------------------|------------------------------------------------------------------------------------------------------------------| + | `active_connection_count` | Asynchronous gauge | Reports number of active connections | + | `append_requests_acked` | Synchronous counter | Counts number of requests acked by the server | + | `append_request_bytes_acked` | Synchronous counter | Counts byte size of requests acked by the server | + | `append_rows_acked` | Synchronous counter | Counts number of rows in requests acked by the server | + | `connection_end_count` | Synchronous counter | Counts number of connection end events. This is decorated with the error code. | + | `connection_start_count` | Synchronous counter | Counts number of connection attempts made, regardless of whether these are initial or retry. | + | `inflight_queue_length` | Asynchronous gauge | Reports length of inflight queue. This queue contains sent append requests waiting for response from the server. | + | `network_response_latency` | Histogram | Reports time taken in milliseconds for a response to arrive once a message has been sent over the network. | + ### Exporting OpenTelemetry metrics + An exporter or collector must be installed by the application in order for [OpenTelemetry metrics to be captured](https://opentelemetry.io/docs/concepts/components/#exporters). + The [sample application](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/test/java/com/example/bigquerystorage/ExportOpenTelemetryIT.java) uses [Google Monitoring Metrics Exporter](https://github.com/GoogleCloudPlatform/opentelemetry-operations-java/tree/main/exporters/metrics) to export metrics to a Google Cloud project. diff --git a/java-bigquerystorage/.repo-metadata.json b/java-bigquerystorage/.repo-metadata.json new file mode 100644 index 000000000000..d1243ff6695f --- /dev/null +++ b/java-bigquerystorage/.repo-metadata.json @@ -0,0 +1,19 @@ +{ + "api_shortname": "bigquerystorage", + "name_pretty": "BigQuery Storage", + "product_documentation": "https://cloud.google.com/bigquery/docs/reference/storage/", + "api_description": "is an API for reading data stored in BigQuery. This API provides direct, high-throughput read access to existing BigQuery tables, supports parallel access with automatic liquid sharding, and allows fine-grained control over what data is returned.", + "client_documentation": "https://cloud.google.com/java/docs/reference/google-cloud-bigquerystorage/latest/history", + "release_level": "stable", + "transport": "grpc", + "language": "java", + "repo": "googleapis/google-cloud-java", + "repo_short": "java-bigquerystorage", + "distribution_name": "com.google.cloud:google-cloud-bigquerystorage", + "api_id": "bigquerystorage.googleapis.com", + "library_type": "GAPIC_COMBO", + "requires_billing": true, + "codeowner_team": "@googleapis/api-bigquery", + "issue_tracker": "https://issuetracker.google.com/savedsearches/559654", + "recommended_package": "com.google.cloud.bigquery.storage.v1" +} \ No newline at end of file diff --git a/java-bigquerystorage/CHANGELOG.md b/java-bigquerystorage/CHANGELOG.md new file mode 100644 index 000000000000..8601208489b2 --- /dev/null +++ b/java-bigquerystorage/CHANGELOG.md @@ -0,0 +1,3399 @@ +# Changelog + +## [3.19.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.19.0...v3.19.1) (2026-01-15) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.65.1 ([4b9ce88](https://github.com/googleapis/java-bigquerystorage/commit/4b9ce887eb275403e0472f3561bc4abd2d2053b8)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.57.1 ([#3060](https://github.com/googleapis/java-bigquerystorage/issues/3060)) ([3a82884](https://github.com/googleapis/java-bigquerystorage/commit/3a828848626d62dde6f89d9ef4bc781bda2b4c5e)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.55.1 ([#3173](https://github.com/googleapis/java-bigquerystorage/issues/3173)) ([fc161f2](https://github.com/googleapis/java-bigquerystorage/commit/fc161f230eef41c9458dcdc9ab95e9549e5134b8)) +* Update googleapis/sdk-platform-java action to v2.65.1 ([#3172](https://github.com/googleapis/java-bigquerystorage/issues/3172)) ([fdd8e4f](https://github.com/googleapis/java-bigquerystorage/commit/fdd8e4f2252dcca9aa31c6169a77f0c27f6ff554)) + + +### Documentation + +* Add samples for using timestamps with BQStorage Read and Write API ([#3167](https://github.com/googleapis/java-bigquerystorage/issues/3167)) ([332736e](https://github.com/googleapis/java-bigquerystorage/commit/332736e8a2e9569163838d266ff49897486b9c3a)) + +## [3.19.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.18.0...v3.19.0) (2025-12-12) + + +### Features + +* Add picosecond timestamp support for Json to Proto converter ([#3131](https://github.com/googleapis/java-bigquerystorage/issues/3131)) ([ea1bcc5](https://github.com/googleapis/java-bigquerystorage/commit/ea1bcc509b7c430f92c5764cc4121aaa282255bf)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.54.2 ([#3137](https://github.com/googleapis/java-bigquerystorage/issues/3137)) ([2dc42c7](https://github.com/googleapis/java-bigquerystorage/commit/2dc42c7a6a15c4b86f7012a0cd211ca09ffd9a0e)) +* Update googleapis/sdk-platform-java action to v2.64.2 ([#3138](https://github.com/googleapis/java-bigquerystorage/issues/3138)) ([28cbdd9](https://github.com/googleapis/java-bigquerystorage/commit/28cbdd9f5ab36e9d7d0aa8142260dc760e50a3fb)) + +## [3.18.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.17.3...v3.18.0) (2025-11-13) + + +### Features + +* Support picosecond timestamp precision in BigQuery Storage API ([3704f63](https://github.com/googleapis/java-bigquerystorage/commit/3704f63150075eca0aacd100227125656aff39a6)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.64.1 ([3704f63](https://github.com/googleapis/java-bigquerystorage/commit/3704f63150075eca0aacd100227125656aff39a6)) +* Make location cache expire after 10 minutes ([#3117](https://github.com/googleapis/java-bigquerystorage/issues/3117)) ([f7f3c06](https://github.com/googleapis/java-bigquerystorage/commit/f7f3c063846863360503a14c401d62ee6e9b0df4)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.54.1 ([#3120](https://github.com/googleapis/java-bigquerystorage/issues/3120)) ([19ed2ec](https://github.com/googleapis/java-bigquerystorage/commit/19ed2eccdf07d3b5a211ae0781851ad838601edd)) + +## [3.17.3](https://github.com/googleapis/java-bigquerystorage/compare/v3.17.2...v3.17.3) (2025-10-20) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.53.0 ([#3112](https://github.com/googleapis/java-bigquerystorage/issues/3112)) ([345b153](https://github.com/googleapis/java-bigquerystorage/commit/345b153070eb2c91298fe5affa5d1d6b4be6f235)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.53.0 ([#3114](https://github.com/googleapis/java-bigquerystorage/issues/3114)) ([1ddfc63](https://github.com/googleapis/java-bigquerystorage/commit/1ddfc63cea149e2a5ff5087a6c780e252e0afa59)) + +## [3.17.2](https://github.com/googleapis/java-bigquerystorage/compare/v3.17.1...v3.17.2) (2025-10-07) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.62.3 ([b9fad83](https://github.com/googleapis/java-bigquerystorage/commit/b9fad836efc0c2a1dac8955d93a2d73e3287de45)) +* Update apimaxrequestbytes to be the accurate 20mb ([#3099](https://github.com/googleapis/java-bigquerystorage/issues/3099)) ([00b465b](https://github.com/googleapis/java-bigquerystorage/commit/00b465b8f1423dd3826411cf1c496d5e60aa6025)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.3 ([#3103](https://github.com/googleapis/java-bigquerystorage/issues/3103)) ([04d9c5e](https://github.com/googleapis/java-bigquerystorage/commit/04d9c5e66c936457bd9b5d54c01635ff8d97d6c0)) + +## [3.17.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.17.0...v3.17.1) (2025-09-25) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.62.2 ([25282fc](https://github.com/googleapis/java-bigquerystorage/commit/25282fc736278bbfb5b6086c16da8b37d3bdc139)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.2 ([#3092](https://github.com/googleapis/java-bigquerystorage/issues/3092)) ([b91cb97](https://github.com/googleapis/java-bigquerystorage/commit/b91cb973fc00e0cba2222bd94b2543730f02c42a)) + +## [3.17.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.16.3...v3.17.0) (2025-09-11) + + +### Features + +* **write:** Support Arrow format for Write API ([#3086](https://github.com/googleapis/java-bigquerystorage/issues/3086)) ([6b37a95](https://github.com/googleapis/java-bigquerystorage/commit/6b37a959b2d30dcc70052b29abc4607106f1bf28)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.1 ([#3088](https://github.com/googleapis/java-bigquerystorage/issues/3088)) ([4cb9fab](https://github.com/googleapis/java-bigquerystorage/commit/4cb9fab5c9c5ea716aab1813efcab319414d46c9)) +* Update googleapis/sdk-platform-java action to v2.62.1 ([#3089](https://github.com/googleapis/java-bigquerystorage/issues/3089)) ([08a1968](https://github.com/googleapis/java-bigquerystorage/commit/08a19685f519cd0a8ad43572377552e0b3727014)) + +## [3.16.3](https://github.com/googleapis/java-bigquerystorage/compare/v3.16.2...v3.16.3) (2025-08-26) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.0 ([#3076](https://github.com/googleapis/java-bigquerystorage/issues/3076)) ([a99d3f3](https://github.com/googleapis/java-bigquerystorage/commit/a99d3f32b7f732c276f5a57e317f3173044c7a8d)) + +## [3.16.2](https://github.com/googleapis/java-bigquerystorage/compare/v3.16.1...v3.16.2) (2025-08-05) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.61.0 ([7ab0f12](https://github.com/googleapis/java-bigquerystorage/commit/7ab0f121ae2ba94e70875fd8505da1bea100a573)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.51.0 ([#3064](https://github.com/googleapis/java-bigquerystorage/issues/3064)) ([e33c4ae](https://github.com/googleapis/java-bigquerystorage/commit/e33c4aef51815ae6c8887301b2873710a28aa8c2)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.13.4 ([#3050](https://github.com/googleapis/java-bigquerystorage/issues/3050)) ([8a6b906](https://github.com/googleapis/java-bigquerystorage/commit/8a6b906970ea4dd05d1fbcb4557342cfa78ffb7c)) +* Update googleapis/sdk-platform-java action to v2.61.0 ([#3065](https://github.com/googleapis/java-bigquerystorage/issues/3065)) ([80acc0b](https://github.com/googleapis/java-bigquerystorage/commit/80acc0b85689716739d169df14edf52461d9da0d)) + +## [3.16.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.16.0...v3.16.1) (2025-07-28) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.60.2 ([d00dc03](https://github.com/googleapis/java-bigquerystorage/commit/d00dc03867b587be50ed6be12bc7eb7e7fd7cc7f)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.53.0 ([#3047](https://github.com/googleapis/java-bigquerystorage/issues/3047)) ([9ce3925](https://github.com/googleapis/java-bigquerystorage/commit/9ce39254cc26d0dd6bcf019d259a124461cacfc2)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.50.2 ([#3051](https://github.com/googleapis/java-bigquerystorage/issues/3051)) ([6ecd933](https://github.com/googleapis/java-bigquerystorage/commit/6ecd9338bac7382ad58cfafe25d7268a6d9654eb)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.52.0 ([#3044](https://github.com/googleapis/java-bigquerystorage/issues/3044)) ([f922f1f](https://github.com/googleapis/java-bigquerystorage/commit/f922f1fdd78442ee4111a79da20ebb0ba891ccc3)) +* Update dependency io.opentelemetry:opentelemetry-exporter-logging to v1.52.0 ([#3045](https://github.com/googleapis/java-bigquerystorage/issues/3045)) ([c59ec44](https://github.com/googleapis/java-bigquerystorage/commit/c59ec448729ddee124755015d652143860fd1179)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.11.0 ([#3049](https://github.com/googleapis/java-bigquerystorage/issues/3049)) ([84ae4f8](https://github.com/googleapis/java-bigquerystorage/commit/84ae4f8cc6b506cc2a804227dbe424c9f85fbc75)) + +## [3.16.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.15.3...v3.16.0) (2025-07-11) + + +### Features + +* Lower maxInflightRequest setting so that connection pool can scale up more efficiently. ([#2993](https://github.com/googleapis/java-bigquerystorage/issues/2993)) ([38e5837](https://github.com/googleapis/java-bigquerystorage/commit/38e5837dda2dfd2c2b22bedae6db089892f129f1)) +* Next release from main branch is 3.16.0 ([#3037](https://github.com/googleapis/java-bigquerystorage/issues/3037)) ([314d641](https://github.com/googleapis/java-bigquerystorage/commit/314d6419d29c175106f7d935be8c6ccf442466a5)) + + +### Bug Fixes + +* Add UNKNOWN to retry code, the same set was used for request level retries. ([#3034](https://github.com/googleapis/java-bigquerystorage/issues/3034)) ([60fb0c7](https://github.com/googleapis/java-bigquerystorage/commit/60fb0c743a1546d8ca2d6ea4c79e88d33ae09139)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.52.0 ([#3028](https://github.com/googleapis/java-bigquerystorage/issues/3028)) ([6501ab5](https://github.com/googleapis/java-bigquerystorage/commit/6501ab5b204a9d13dce709fd832173645596f5d9)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.50.1 ([#3035](https://github.com/googleapis/java-bigquerystorage/issues/3035)) ([40d4e06](https://github.com/googleapis/java-bigquerystorage/commit/40d4e061f45ac3a08806828a4e47a2b48843f551)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.13.3 ([#3032](https://github.com/googleapis/java-bigquerystorage/issues/3032)) ([951f769](https://github.com/googleapis/java-bigquerystorage/commit/951f7697e28f8bfbe27130da29904c888266f30a)) +* Update googleapis/sdk-platform-java action to v2.60.1 ([#3036](https://github.com/googleapis/java-bigquerystorage/issues/3036)) ([c2941a5](https://github.com/googleapis/java-bigquerystorage/commit/c2941a5d6f99515d5ee49ebcfd00a855f22dce1c)) +* Update googleapis/sdk-platform-java action to v2.60.1 ([#3040](https://github.com/googleapis/java-bigquerystorage/issues/3040)) ([11412b4](https://github.com/googleapis/java-bigquerystorage/commit/11412b4b99510f5c52f26a3b83d7ce5cab7ae032)) + +## [3.15.3](https://github.com/googleapis/java-bigquerystorage/compare/v3.15.2...v3.15.3) (2025-06-25) + + +### Bug Fixes + +* StreamWriter message typo ([#3006](https://github.com/googleapis/java-bigquerystorage/issues/3006)) ([8e95be5](https://github.com/googleapis/java-bigquerystorage/commit/8e95be59d374785447fbb4c871d2ce61661ec25d)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.51.0 ([#3004](https://github.com/googleapis/java-bigquerystorage/issues/3004)) ([57d9df0](https://github.com/googleapis/java-bigquerystorage/commit/57d9df0eb1cfcd0a88197fbe474102ac8c928212)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.49.1 ([#3011](https://github.com/googleapis/java-bigquerystorage/issues/3011)) ([1521f28](https://github.com/googleapis/java-bigquerystorage/commit/1521f287f2ad5565ed18e20e0540f2521e1f7dc9)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.49.2 ([#3019](https://github.com/googleapis/java-bigquerystorage/issues/3019)) ([03685d9](https://github.com/googleapis/java-bigquerystorage/commit/03685d9c549e4206e1c3fedcfad9539e6e981bbe)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.50.0 ([#3023](https://github.com/googleapis/java-bigquerystorage/issues/3023)) ([820cec1](https://github.com/googleapis/java-bigquerystorage/commit/820cec13b6cd88660278c015876eeb2c0a776c50)) +* Update dependency com.google.cloud.opentelemetry:exporter-metrics to v0.36.0 ([#3024](https://github.com/googleapis/java-bigquerystorage/issues/3024)) ([d230ead](https://github.com/googleapis/java-bigquerystorage/commit/d230ead35b66e6dbdb6e4042d804145160b1ab71)) +* Update dependency com.google.http-client:google-http-client to v1.47.1 ([#3020](https://github.com/googleapis/java-bigquerystorage/issues/3020)) ([ea87aa3](https://github.com/googleapis/java-bigquerystorage/commit/ea87aa3d4616d701bd8965d6f89481182f2f6da6)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.13.2 ([#3022](https://github.com/googleapis/java-bigquerystorage/issues/3022)) ([9b57395](https://github.com/googleapis/java-bigquerystorage/commit/9b57395c1e1a922ca11458321ab51decc7e74026)) +* Update googleapis/sdk-platform-java action to v2.59.1 ([#3008](https://github.com/googleapis/java-bigquerystorage/issues/3008)) ([1a5dd9c](https://github.com/googleapis/java-bigquerystorage/commit/1a5dd9c616d0f5574e442444e5402f1b80e9b76e)) +* Update googleapis/sdk-platform-java action to v2.59.2 ([#3018](https://github.com/googleapis/java-bigquerystorage/issues/3018)) ([5caccc4](https://github.com/googleapis/java-bigquerystorage/commit/5caccc490e4b800023f567dc9274ea26c870fcda)) +* Update googleapis/sdk-platform-java action to v2.60.0 ([#3021](https://github.com/googleapis/java-bigquerystorage/issues/3021)) ([1ef5170](https://github.com/googleapis/java-bigquerystorage/commit/1ef51703f8a9e0317e8c201bb2d0baf83820df00)) + +## [3.15.2](https://github.com/googleapis/java-bigquerystorage/compare/v3.15.1...v3.15.2) (2025-06-17) + + +### Dependencies + +* Remove version declaration of gson and open-telemetry-bom ([#3012](https://github.com/googleapis/java-bigquerystorage/issues/3012)) ([c57ef03](https://github.com/googleapis/java-bigquerystorage/commit/c57ef0385c1415506d15bc1b6b2e787725d3cefb)) + +## [3.15.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.15.0...v3.15.1) (2025-06-16) + + +### Dependencies + +* Update dependency com.google.cloud.opentelemetry:exporter-metrics to v0.35.0 ([#2988](https://github.com/googleapis/java-bigquerystorage/issues/2988)) ([c16eefd](https://github.com/googleapis/java-bigquerystorage/commit/c16eefd3d4225b3f7eaff32134658a87be2f63a2)) +* Update dependency com.google.code.gson:gson to v2.13.1 ([#2990](https://github.com/googleapis/java-bigquerystorage/issues/2990)) ([c7ea5a8](https://github.com/googleapis/java-bigquerystorage/commit/c7ea5a898f4803850d2a7d23c5e20949b2f98967)) +* Update dependency com.google.protobuf:protobuf-java-util to v3.25.8 ([#2987](https://github.com/googleapis/java-bigquerystorage/issues/2987)) ([7e8829e](https://github.com/googleapis/java-bigquerystorage/commit/7e8829e277e6cfa49d5f01f956afb42ba4c74350)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.51.0 ([#2992](https://github.com/googleapis/java-bigquerystorage/issues/2992)) ([d746ad8](https://github.com/googleapis/java-bigquerystorage/commit/d746ad89e8273d71003b9ba7add73e2ae3d87ced)) +* Update dependency io.opentelemetry:opentelemetry-exporter-logging to v1.51.0 ([#3005](https://github.com/googleapis/java-bigquerystorage/issues/3005)) ([896956c](https://github.com/googleapis/java-bigquerystorage/commit/896956c7a18099eb0ea2fb4f3f22c688e31bf84c)) +* Update dependency org.json:json to v20250517 ([#2980](https://github.com/googleapis/java-bigquerystorage/issues/2980)) ([57b944e](https://github.com/googleapis/java-bigquerystorage/commit/57b944e165ae80eb75a7634190a62e4030ab93d8)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.13.1 ([#2998](https://github.com/googleapis/java-bigquerystorage/issues/2998)) ([9cf104b](https://github.com/googleapis/java-bigquerystorage/commit/9cf104b53b6752488d82170e57fbb46a720a25cc)) + +## [3.15.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.14.1...v3.15.0) (2025-06-04) + + +### Features + +* **bigquery:** Integrate Otel Tracing in storage lib ([#2983](https://github.com/googleapis/java-bigquerystorage/issues/2983)) ([c84fad6](https://github.com/googleapis/java-bigquerystorage/commit/c84fad667f6b0dea7135b87ceb15e60cad8dff3d)) +* Provide append() methods that accept com.google.gson objects ([#2985](https://github.com/googleapis/java-bigquerystorage/issues/2985)) ([b8dff57](https://github.com/googleapis/java-bigquerystorage/commit/b8dff570a27b77bdfa0cceccd207e0fbf1d16d65)) + + +### Bug Fixes + +* **bigquery:** Allow users to supply custom TracerProvider ([#2995](https://github.com/googleapis/java-bigquerystorage/issues/2995)) ([88095e6](https://github.com/googleapis/java-bigquerystorage/commit/88095e60df7ef8686b35c679501fbd3939f994d9)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.58.0 ([45d50bb](https://github.com/googleapis/java-bigquerystorage/commit/45d50bb0f7a6fc2ac226860c5b1f5ac2d27bb3b4)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.59.0 ([104571c](https://github.com/googleapis/java-bigquerystorage/commit/104571c453de7153dc05522fbb69b3be1b612e1e)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.50.1 ([#2981](https://github.com/googleapis/java-bigquerystorage/issues/2981)) ([3869181](https://github.com/googleapis/java-bigquerystorage/commit/38691813495485e81b2d7c3d4c7d108d5bfe57dc)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.49.0 ([#2989](https://github.com/googleapis/java-bigquerystorage/issues/2989)) ([40a1b95](https://github.com/googleapis/java-bigquerystorage/commit/40a1b9541c1967e9a8f1f647a48fd8484e0783ae)) + +## [3.14.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.14.0...v3.14.1) (2025-05-15) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.57.0 ([745bedb](https://github.com/googleapis/java-bigquerystorage/commit/745bedbd7f907361f89daa165ef9b361188c61a1)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.50.0 ([#2969](https://github.com/googleapis/java-bigquerystorage/issues/2969)) ([7a85ae2](https://github.com/googleapis/java-bigquerystorage/commit/7a85ae2c616f6c851f9d45b663be5ce8ba064a4e)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.48.0 ([#2975](https://github.com/googleapis/java-bigquerystorage/issues/2975)) ([0f94876](https://github.com/googleapis/java-bigquerystorage/commit/0f94876d274968bb27c748f9a58062cfb4499e3b)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.50.0 ([#2971](https://github.com/googleapis/java-bigquerystorage/issues/2971)) ([adafd79](https://github.com/googleapis/java-bigquerystorage/commit/adafd799d43b5fdea8c527432930069fe01f08f6)) +* Update dependency io.opentelemetry:opentelemetry-exporter-logging to v1.50.0 ([#2972](https://github.com/googleapis/java-bigquerystorage/issues/2972)) ([a09a457](https://github.com/googleapis/java-bigquerystorage/commit/a09a457a4522da71c4abf255e3e1deb23364bc73)) + +## [3.14.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.13.1...v3.14.0) (2025-05-06) + + +### Features + +* Generate BigQueryStorage v1beta library ([#2962](https://github.com/googleapis/java-bigquerystorage/issues/2962)) ([c31f551](https://github.com/googleapis/java-bigquerystorage/commit/c31f551db95643804bcfad8dbc591f1f03a12854)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.47.0 ([#2966](https://github.com/googleapis/java-bigquerystorage/issues/2966)) ([01e5c04](https://github.com/googleapis/java-bigquerystorage/commit/01e5c0499137231331bd5fc4364ec90679de6529)) +* Update googleapis/sdk-platform-java action to v2.57.0 ([#2967](https://github.com/googleapis/java-bigquerystorage/issues/2967)) ([9e8ddae](https://github.com/googleapis/java-bigquerystorage/commit/9e8ddae6fc6ba0ecdc9299c796cde671f856c2dc)) + +## [3.13.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.13.0...v3.13.1) (2025-05-05) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.56.3 ([5a62a1a](https://github.com/googleapis/java-bigquerystorage/commit/5a62a1a35b741426d4208980f39bc3f082f9091d)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.49.2 ([#2954](https://github.com/googleapis/java-bigquerystorage/issues/2954)) ([9cfe7d8](https://github.com/googleapis/java-bigquerystorage/commit/9cfe7d81d6a98905335fb892c84f3031d6fea008)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.46.3 ([#2958](https://github.com/googleapis/java-bigquerystorage/issues/2958)) ([f4b1d93](https://github.com/googleapis/java-bigquerystorage/commit/f4b1d932ec9addae2241da93ba4dcf3d2199d64a)) +* Update dependency com.google.http-client:google-http-client to v1.47.0 ([#2955](https://github.com/googleapis/java-bigquerystorage/issues/2955)) ([02d5fa2](https://github.com/googleapis/java-bigquerystorage/commit/02d5fa23d8bc2ae1908ef527940bd0236d2a318b)) +* Update dependency com.google.protobuf:protobuf-java-util to v3.25.7 ([#2949](https://github.com/googleapis/java-bigquerystorage/issues/2949)) ([0a6f744](https://github.com/googleapis/java-bigquerystorage/commit/0a6f744ebe08d6cd038f18f9cdbeec1c714b6005)) +* Update dependency com.google.protobuf:protobuf-java-util to v3.25.7 ([#2949](https://github.com/googleapis/java-bigquerystorage/issues/2949)) ([09db7cd](https://github.com/googleapis/java-bigquerystorage/commit/09db7cdc1268f7ff7292cd70df65b42cede11fde)) + +## [3.13.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.12.0...v3.13.0) (2025-04-25) + + +### Features + +* Increased the number of partitions can be written in a single request ([f5c01da](https://github.com/googleapis/java-bigquerystorage/commit/f5c01da9c8a96122130cfc5a240dd52fc00441cd)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.56.2 ([4c8edd5](https://github.com/googleapis/java-bigquerystorage/commit/4c8edd5457b782a81554ff4715f834770e126580)) +* Don't start a retry timer if connection was closed due to being idle ([#2942](https://github.com/googleapis/java-bigquerystorage/issues/2942)) ([e17ada2](https://github.com/googleapis/java-bigquerystorage/commit/e17ada2399347e9427e6ec0c9c0259e667c7f389)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.49.0 ([#2922](https://github.com/googleapis/java-bigquerystorage/issues/2922)) ([d2b0bef](https://github.com/googleapis/java-bigquerystorage/commit/d2b0bef9fcccfa6b5ff71868e69e0db82d20faa0)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.49.1 ([#2948](https://github.com/googleapis/java-bigquerystorage/issues/2948)) ([90856b2](https://github.com/googleapis/java-bigquerystorage/commit/90856b234e01cdc6dac6def1095b1255f933aa1c)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.46.2 ([#2946](https://github.com/googleapis/java-bigquerystorage/issues/2946)) ([b828283](https://github.com/googleapis/java-bigquerystorage/commit/b82828386eb42254aeb22027adf3e049a6684db8)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.49.0 ([#2935](https://github.com/googleapis/java-bigquerystorage/issues/2935)) ([85e6f0e](https://github.com/googleapis/java-bigquerystorage/commit/85e6f0e17ec57f2c791f3397817fe418252a87b7)) +* Update dependency io.opentelemetry:opentelemetry-exporter-logging to v1.49.0 ([#2936](https://github.com/googleapis/java-bigquerystorage/issues/2936)) ([3049ccc](https://github.com/googleapis/java-bigquerystorage/commit/3049ccca040ffd3ecfcb3ee6634a2888b39b3739)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.12.2 ([#2939](https://github.com/googleapis/java-bigquerystorage/issues/2939)) ([b682c72](https://github.com/googleapis/java-bigquerystorage/commit/b682c728226254e652824652fedc9aec63e1bf36)) + +## [3.12.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.11.4...v3.12.0) (2025-03-19) + + +### Features + +* Next release from main branch is 3.12.0 ([#2890](https://github.com/googleapis/java-bigquerystorage/issues/2890)) ([ae6e540](https://github.com/googleapis/java-bigquerystorage/commit/ae6e54046f5922bd888be46106be19e1ee97c7e7)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.54.0 ([5fcaa9f](https://github.com/googleapis/java-bigquerystorage/commit/5fcaa9fbc0a99a63cf12fee7ce598a601c3e506b)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.55.0 ([0ca4dba](https://github.com/googleapis/java-bigquerystorage/commit/0ca4dba010813f2d2565190253b3d97b4e61602d)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.55.1 ([29d0546](https://github.com/googleapis/java-bigquerystorage/commit/29d054639cd5a1e734d833ede2eb1d5b994807c1)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.48.1 ([#2886](https://github.com/googleapis/java-bigquerystorage/issues/2886)) ([eee3e60](https://github.com/googleapis/java-bigquerystorage/commit/eee3e6027cccef48f3b0e6ae2c342472216b545b)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.45.1 ([#2903](https://github.com/googleapis/java-bigquerystorage/issues/2903)) ([21a39c7](https://github.com/googleapis/java-bigquerystorage/commit/21a39c7e33a438565e7a82f59061a75ab538249b)) +* Update dependency com.google.cloud.opentelemetry:exporter-metrics to v0.34.0 ([#2912](https://github.com/googleapis/java-bigquerystorage/issues/2912)) ([c99a713](https://github.com/googleapis/java-bigquerystorage/commit/c99a71355a5dcb2e4d3d33c4e51282889bb4de13)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.48.0 ([#2897](https://github.com/googleapis/java-bigquerystorage/issues/2897)) ([011e177](https://github.com/googleapis/java-bigquerystorage/commit/011e177c8ce560e902620d8d0a5712c4f092255e)) +* Update dependency io.opentelemetry:opentelemetry-exporter-logging to v1.48.0 ([#2899](https://github.com/googleapis/java-bigquerystorage/issues/2899)) ([e809ea0](https://github.com/googleapis/java-bigquerystorage/commit/e809ea0b7629c40d1e9d16e5104dd8dc789f2ee9)) +* Update dependency node to v22 ([#2901](https://github.com/googleapis/java-bigquerystorage/issues/2901)) ([1cb2299](https://github.com/googleapis/java-bigquerystorage/commit/1cb229997c3816f135106465c3cf9705bff284e0)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.6 ([#2905](https://github.com/googleapis/java-bigquerystorage/issues/2905)) ([6e3fc75](https://github.com/googleapis/java-bigquerystorage/commit/6e3fc757b2b9586e86ca99ccdb7c2d34af49b737)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.12.1 ([#2909](https://github.com/googleapis/java-bigquerystorage/issues/2909)) ([fd56488](https://github.com/googleapis/java-bigquerystorage/commit/fd564889ba1425612acc6ce0828d7623c261edb6)) +* Update googleapis/sdk-platform-java action to v2.55.0 ([#2904](https://github.com/googleapis/java-bigquerystorage/issues/2904)) ([1a09935](https://github.com/googleapis/java-bigquerystorage/commit/1a09935b1ca099d2f1129c128a0d970a6fb71ae4)) + +## [3.11.4](https://github.com/googleapis/java-bigquerystorage/compare/v3.11.3...v3.11.4) (2025-02-26) + + +### Bug Fixes + +* Clarify connection pool document ([#2877](https://github.com/googleapis/java-bigquerystorage/issues/2877)) ([4a4adbb](https://github.com/googleapis/java-bigquerystorage/commit/4a4adbbe501fe772155f25e9606194a8ed15e044)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.48.0 ([#2865](https://github.com/googleapis/java-bigquerystorage/issues/2865)) ([e934eb6](https://github.com/googleapis/java-bigquerystorage/commit/e934eb680b4a919626e0ca8de2fc97837ccded9b)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.44.0 ([#2879](https://github.com/googleapis/java-bigquerystorage/issues/2879)) ([de93f74](https://github.com/googleapis/java-bigquerystorage/commit/de93f74b737d3ca752ec213a9ea17d994c4419df)) +* Update dependency com.google.http-client:google-http-client to v1.46.2 ([#2874](https://github.com/googleapis/java-bigquerystorage/issues/2874)) ([4ba9010](https://github.com/googleapis/java-bigquerystorage/commit/4ba90103ad8e8a631603942b2fce6741634dcbfd)) +* Update dependency com.google.http-client:google-http-client to v1.46.3 ([#2876](https://github.com/googleapis/java-bigquerystorage/issues/2876)) ([1f5dc49](https://github.com/googleapis/java-bigquerystorage/commit/1f5dc493c49923b2a2f4c3e57b4fe3cb19c81f20)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.12.0 ([#2871](https://github.com/googleapis/java-bigquerystorage/issues/2871)) ([165502a](https://github.com/googleapis/java-bigquerystorage/commit/165502aca0a1fbdef90dffb500ee2500bfc65ae8)) +* Update googleapis/sdk-platform-java action to v2.54.0 ([#2878](https://github.com/googleapis/java-bigquerystorage/issues/2878)) ([5593950](https://github.com/googleapis/java-bigquerystorage/commit/5593950a16618aaa37606cc3d64efdb4da743726)) + +## [3.11.3](https://github.com/googleapis/java-bigquerystorage/compare/v3.11.2...v3.11.3) (2025-02-12) + + +### Bug Fixes + +* Allow trace id to switch within the same connection ([#2852](https://github.com/googleapis/java-bigquerystorage/issues/2852)) ([850eaea](https://github.com/googleapis/java-bigquerystorage/commit/850eaea9eaba2f6a43055fa8319d7342901ea21d)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.53.0 ([c757440](https://github.com/googleapis/java-bigquerystorage/commit/c75744088a32598644d4924b7d2dfdcef37ff0d1)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.47.0 ([#2845](https://github.com/googleapis/java-bigquerystorage/issues/2845)) ([b089788](https://github.com/googleapis/java-bigquerystorage/commit/b08978808987dbb4b317d492e2306d24b6427669)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.43.0 ([#2859](https://github.com/googleapis/java-bigquerystorage/issues/2859)) ([ceb926a](https://github.com/googleapis/java-bigquerystorage/commit/ceb926ad9515dfc15ed5f6c7ea15506351d7d37d)) +* Update dependency com.google.http-client:google-http-client to v1.46.1 ([#2854](https://github.com/googleapis/java-bigquerystorage/issues/2854)) ([037ca78](https://github.com/googleapis/java-bigquerystorage/commit/037ca78f20d1ae0d86c1acaf2a48d75418207501)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.47.0 ([#2856](https://github.com/googleapis/java-bigquerystorage/issues/2856)) ([14d12ba](https://github.com/googleapis/java-bigquerystorage/commit/14d12ba859f5af961c5b382bba47e118bcb69d3b)) +* Update dependency io.opentelemetry:opentelemetry-exporter-logging to v1.47.0 ([#2857](https://github.com/googleapis/java-bigquerystorage/issues/2857)) ([1839051](https://github.com/googleapis/java-bigquerystorage/commit/183905176cbedfc9e1bfcf744907fd624d99c8a5)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.5 ([#2851](https://github.com/googleapis/java-bigquerystorage/issues/2851)) ([2ff6112](https://github.com/googleapis/java-bigquerystorage/commit/2ff6112a7f7d7a938d2267e60a4fdfe1fb5baaea)) +* Update googleapis/sdk-platform-java action to v2.53.0 ([#2858](https://github.com/googleapis/java-bigquerystorage/issues/2858)) ([2412f62](https://github.com/googleapis/java-bigquerystorage/commit/2412f629bd7a305d8b19685ef5b8067702187aec)) + +## [3.11.2](https://github.com/googleapis/java-bigquerystorage/compare/v3.11.1...v3.11.2) (2025-01-28) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.52.0 ([71d6c1a](https://github.com/googleapis/java-bigquerystorage/commit/71d6c1abf8633a24470e08e3b3ad294d545e6164)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.46.0 ([#2826](https://github.com/googleapis/java-bigquerystorage/issues/2826)) ([9bc1596](https://github.com/googleapis/java-bigquerystorage/commit/9bc15969d480a36f9bbbc9cab5ce7aa1410ad3bc)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.42.0 ([#2840](https://github.com/googleapis/java-bigquerystorage/issues/2840)) ([b270ff0](https://github.com/googleapis/java-bigquerystorage/commit/b270ff056b0065bcc57c3b8fd6bacf71fb4cfe77)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.46.0 ([#2825](https://github.com/googleapis/java-bigquerystorage/issues/2825)) ([4806d11](https://github.com/googleapis/java-bigquerystorage/commit/4806d11498e4c57f5a694bc05a9f1df3dcba841f)) +* Update dependency io.opentelemetry:opentelemetry-exporter-logging to v1.46.0 ([#2822](https://github.com/googleapis/java-bigquerystorage/issues/2822)) ([4b59e8f](https://github.com/googleapis/java-bigquerystorage/commit/4b59e8f3f43b08b6d7c670773d540cd1ce827d39)) +* Update googleapis/sdk-platform-java action to v2.52.0 ([#2838](https://github.com/googleapis/java-bigquerystorage/issues/2838)) ([0058a0b](https://github.com/googleapis/java-bigquerystorage/commit/0058a0b3339d78cd2d9824bf24434e442753c97f)) + +## [3.11.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.11.0...v3.11.1) (2025-01-10) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.51.1 ([693c294](https://github.com/googleapis/java-bigquerystorage/commit/693c29419be0c0daad404500ba764f62de9ad56a)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.45.0 ([#2812](https://github.com/googleapis/java-bigquerystorage/issues/2812)) ([5c8ebc0](https://github.com/googleapis/java-bigquerystorage/commit/5c8ebc0e01171495c37d14d451b5fe63b9b9371b)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.41.1 ([#2819](https://github.com/googleapis/java-bigquerystorage/issues/2819)) ([059cd9b](https://github.com/googleapis/java-bigquerystorage/commit/059cd9bf77365603f45d01c71275e51b654ff381)) +* Update dependency org.json:json to v20241224 ([#2815](https://github.com/googleapis/java-bigquerystorage/issues/2815)) ([d93cf86](https://github.com/googleapis/java-bigquerystorage/commit/d93cf8628dcb2e632a317330d43f22c3b360f36e)) +* Update dependency org.json:json to v20250107 ([#2818](https://github.com/googleapis/java-bigquerystorage/issues/2818)) ([7cfebde](https://github.com/googleapis/java-bigquerystorage/commit/7cfebdeec471eae027204743abec57a6fadab06f)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.11.4 ([#2805](https://github.com/googleapis/java-bigquerystorage/issues/2805)) ([ab94a44](https://github.com/googleapis/java-bigquerystorage/commit/ab94a44a2fd75f1b1b8f78a642ea51e37acee570)) +* Update googleapis/sdk-platform-java action to v2.51.1 ([#2817](https://github.com/googleapis/java-bigquerystorage/issues/2817)) ([2d290ac](https://github.com/googleapis/java-bigquerystorage/commit/2d290ac07f0ad9e0c2af3556e2efeeb3305f2bdf)) + +## [3.11.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.10.3...v3.11.0) (2024-12-12) + + +### Features + +* Add support for byte and short ([#2789](https://github.com/googleapis/java-bigquerystorage/issues/2789)) ([98a714f](https://github.com/googleapis/java-bigquerystorage/commit/98a714fbdd409632c000e01c2ada9565e483eecc)) +* Introduce `java.time` methods and variables ([#2780](https://github.com/googleapis/java-bigquerystorage/issues/2780)) ([8dd66d5](https://github.com/googleapis/java-bigquerystorage/commit/8dd66d56ae9a0cb25171e466dcd8dc1e78660872)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.51.0 ([4936ac9](https://github.com/googleapis/java-bigquerystorage/commit/4936ac952e76c08239c0aad28cc38c3212591dcd)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.44.0 ([#2778](https://github.com/googleapis/java-bigquerystorage/issues/2778)) ([91be63a](https://github.com/googleapis/java-bigquerystorage/commit/91be63a273441ee50610545c4acce9ae2b86639c)) +* Update dependency com.google.http-client:google-http-client to v1.45.3 ([#2790](https://github.com/googleapis/java-bigquerystorage/issues/2790)) ([cd0e62e](https://github.com/googleapis/java-bigquerystorage/commit/cd0e62e8fa4de42cc2fc4e9e38650eea69fc7035)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.45.0 ([#2792](https://github.com/googleapis/java-bigquerystorage/issues/2792)) ([e6cfc49](https://github.com/googleapis/java-bigquerystorage/commit/e6cfc49039c2d9f1bd49ec4d423c9cb0192680c5)) +* Update dependency io.opentelemetry:opentelemetry-exporter-logging to v1.45.0 ([#2793](https://github.com/googleapis/java-bigquerystorage/issues/2793)) ([eea1d82](https://github.com/googleapis/java-bigquerystorage/commit/eea1d82aaae379c98f35b6da067cb84e28522655)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.4 ([#2787](https://github.com/googleapis/java-bigquerystorage/issues/2787)) ([b85f01d](https://github.com/googleapis/java-bigquerystorage/commit/b85f01d809aa3e9e84c81ab4af8cf8c0eba98c0b)) +* Update sdk-platform-java dependencies ([#2798](https://github.com/googleapis/java-bigquerystorage/issues/2798)) ([1f2eaa0](https://github.com/googleapis/java-bigquerystorage/commit/1f2eaa08d49c1bca88164bcc149cfd75a7146686)) + +## [3.10.3](https://github.com/googleapis/java-bigquerystorage/compare/v3.10.2...v3.10.3) (2024-11-17) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.50.0 ([1b7a04c](https://github.com/googleapis/java-bigquerystorage/commit/1b7a04cff6b93cffa26b2e8124835d7b3d1629ab)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.43.3 ([#2749](https://github.com/googleapis/java-bigquerystorage/issues/2749)) ([9e844df](https://github.com/googleapis/java-bigquerystorage/commit/9e844df78e99520343ac6519b75022c272aef760)) +* Update dependency com.google.http-client:google-http-client to v1.45.1 ([#2769](https://github.com/googleapis/java-bigquerystorage/issues/2769)) ([b0532b2](https://github.com/googleapis/java-bigquerystorage/commit/b0532b2ae51924815dfb9d40422c6f228fb2afc4)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.44.1 ([#2762](https://github.com/googleapis/java-bigquerystorage/issues/2762)) ([31908f6](https://github.com/googleapis/java-bigquerystorage/commit/31908f67e392a2a40b1098f30713893b6492558b)) +* Update dependency io.opentelemetry:opentelemetry-exporter-logging to v1.44.1 ([#2763](https://github.com/googleapis/java-bigquerystorage/issues/2763)) ([28239ba](https://github.com/googleapis/java-bigquerystorage/commit/28239ba88cf7f3edbe543c387ec2bf5645aaf7ab)) +* Update sdk-platform-java dependencies ([#2771](https://github.com/googleapis/java-bigquerystorage/issues/2771)) ([73effc3](https://github.com/googleapis/java-bigquerystorage/commit/73effc344ea394e8d11fecaa1a8f068d59aaec26)) + +## [3.10.2](https://github.com/googleapis/java-bigquerystorage/compare/v3.10.1...v3.10.2) (2024-10-28) + + +### Bug Fixes + +* BQTableSchemaToProtoDescriptor setting of FieldOption. ([#2743](https://github.com/googleapis/java-bigquerystorage/issues/2743)) ([fb57909](https://github.com/googleapis/java-bigquerystorage/commit/fb57909e75c31a1c3f924b8b41c8a9d5f7c73207)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.49.0 ([882a657](https://github.com/googleapis/java-bigquerystorage/commit/882a6576ebab12d5bdd95261d231f49bc24f52ac)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.43.2 ([#2744](https://github.com/googleapis/java-bigquerystorage/issues/2744)) ([1b12580](https://github.com/googleapis/java-bigquerystorage/commit/1b12580ff7de454876870cc08e78b81f3b7b572c)) + +## [3.10.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.10.0...v3.10.1) (2024-10-26) + + +### Bug Fixes + +* Update BigQuerySchemaUtil to use non-deprecated hasExtension ([#2732](https://github.com/googleapis/java-bigquerystorage/issues/2732)) ([b97b234](https://github.com/googleapis/java-bigquerystorage/commit/b97b23423408b1c96941a507cf27ddce84c1be38)) + + +### Dependencies + +* Update actions/checkout digest to 11bd719 ([#2726](https://github.com/googleapis/java-bigquerystorage/issues/2726)) ([01a3a23](https://github.com/googleapis/java-bigquerystorage/commit/01a3a239c2cf3ee0146bea64ffa4ff93515b7ae7)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.11.3 ([#2721](https://github.com/googleapis/java-bigquerystorage/issues/2721)) ([0bc725c](https://github.com/googleapis/java-bigquerystorage/commit/0bc725cce59582706f2abb41804ee58796afb637)) +* Update googleapis/sdk-platform-java action to v2.49.0 ([#2737](https://github.com/googleapis/java-bigquerystorage/issues/2737)) ([46e302f](https://github.com/googleapis/java-bigquerystorage/commit/46e302fcb2345370c5f7fd91e0c9d550c8c05f23)) +* Update sdk-platform-java dependencies ([#2738](https://github.com/googleapis/java-bigquerystorage/issues/2738)) ([9556cfe](https://github.com/googleapis/java-bigquerystorage/commit/9556cfedaf321b49ee21083a376540ea57550519)) + +## [3.10.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.9.3...v3.10.0) (2024-10-23) + + +### Features + +* Add experimental ArrowData type and arrow_data field within AppendRowsRequest ([18faebd](https://github.com/googleapis/java-bigquerystorage/commit/18faebd3eccf317a67306ebaebc987367fca3627)) + + +### Bug Fixes + +* Add stream name to every request when connection is created during multiplexing ([#2699](https://github.com/googleapis/java-bigquerystorage/issues/2699)) ([c53a77c](https://github.com/googleapis/java-bigquerystorage/commit/c53a77c6e0d2d1a639033db98bacccedb3a226f7)) + + +### Dependencies + +* Update actions/checkout digest to eef6144 ([#2687](https://github.com/googleapis/java-bigquerystorage/issues/2687)) ([f3c5a7a](https://github.com/googleapis/java-bigquerystorage/commit/f3c5a7af4316ca5b3656aaa7c65bce12c5932503)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.43.1 ([#2701](https://github.com/googleapis/java-bigquerystorage/issues/2701)) ([6ebed6d](https://github.com/googleapis/java-bigquerystorage/commit/6ebed6d7b450879ff61e4c3222c24709e2809cf9)) +* Update dependency com.google.cloud.opentelemetry:exporter-metrics to v0.33.0 ([#2696](https://github.com/googleapis/java-bigquerystorage/issues/2696)) ([19398db](https://github.com/googleapis/java-bigquerystorage/commit/19398db70f07ed7b2fe52e7aed221e481bee089d)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.43.0 ([#2711](https://github.com/googleapis/java-bigquerystorage/issues/2711)) ([46e3cb0](https://github.com/googleapis/java-bigquerystorage/commit/46e3cb022142bdae8223028549b6d08050895f3c)) +* Update dependency io.opentelemetry:opentelemetry-exporter-logging to v1.43.0 ([#2712](https://github.com/googleapis/java-bigquerystorage/issues/2712)) ([b466e28](https://github.com/googleapis/java-bigquerystorage/commit/b466e287caa61a92fe6d70a9c115c0b596f436b4)) +* Update dependency org.apache.avro:avro to v1.11.4 [security] ([#2694](https://github.com/googleapis/java-bigquerystorage/issues/2694)) ([b0498ba](https://github.com/googleapis/java-bigquerystorage/commit/b0498ba15ef87a703801f4c56867ab744f7c3902)) +* Update sdk-platform-java dependencies ([#2727](https://github.com/googleapis/java-bigquerystorage/issues/2727)) ([dfa7555](https://github.com/googleapis/java-bigquerystorage/commit/dfa75555a52506804debb06b442a6d13dcffc6f2)) + +## [3.9.3](https://github.com/googleapis/java-bigquerystorage/compare/v3.9.2...v3.9.3) (2024-10-07) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.46.1 ([c0f14d8](https://github.com/googleapis/java-bigquerystorage/commit/c0f14d85f5ac4ccb44563449431ca05ccb5591e9)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.47.0 ([3183017](https://github.com/googleapis/java-bigquerystorage/commit/3183017127ee8ff9219eccb8d85f3d1398ab34ed)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.43.0 ([#2677](https://github.com/googleapis/java-bigquerystorage/issues/2677)) ([e03db45](https://github.com/googleapis/java-bigquerystorage/commit/e03db4515dbee57ae9a60fdc851d97a3991ac906)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.37.0 ([#2684](https://github.com/googleapis/java-bigquerystorage/issues/2684)) ([23684e5](https://github.com/googleapis/java-bigquerystorage/commit/23684e50c198eccb696f0be387ab9d24a58dfd92)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.11.2 ([#2682](https://github.com/googleapis/java-bigquerystorage/issues/2682)) ([8cc01e1](https://github.com/googleapis/java-bigquerystorage/commit/8cc01e13dbbb224b2dd64d4fd226755a4884363e)) +* Update googleapis/sdk-platform-java action to v2.47.0 ([#2685](https://github.com/googleapis/java-bigquerystorage/issues/2685)) ([c7c9ccd](https://github.com/googleapis/java-bigquerystorage/commit/c7c9ccdba11effff15a24e4fe985132142308b98)) + +## [3.9.2](https://github.com/googleapis/java-bigquerystorage/compare/v3.9.1...v3.9.2) (2024-09-26) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.46.0 ([1805476](https://github.com/googleapis/java-bigquerystorage/commit/18054767ca551410269f65ca6ccfa6247af0b0e3)) +* Increase method timeout to 240s for BigQuery Metastore Partition Service API version v1alpha ([26e6d33](https://github.com/googleapis/java-bigquerystorage/commit/26e6d33dbce75ac8dfd2f8b5930aa347b1fffe27)) +* Persist missingValueInterpretationMap in StreamWriter's Builder ([#2587](https://github.com/googleapis/java-bigquerystorage/issues/2587)) ([036d2e6](https://github.com/googleapis/java-bigquerystorage/commit/036d2e6606fa05c02f87dae8b5a8db5a70453e55)) + + +### Dependencies + +* Update core dependencies to v1.26.0 ([#2650](https://github.com/googleapis/java-bigquerystorage/issues/2650)) ([1a162bc](https://github.com/googleapis/java-bigquerystorage/commit/1a162bcdf908487eec847d95564c64e07861c37e)) +* Update core dependencies to v1.27.0 ([#2654](https://github.com/googleapis/java-bigquerystorage/issues/2654)) ([521396a](https://github.com/googleapis/java-bigquerystorage/commit/521396a666c460ce5c7b5eff2451ad186c48c967)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.42.3 ([#2643](https://github.com/googleapis/java-bigquerystorage/issues/2643)) ([c65bec0](https://github.com/googleapis/java-bigquerystorage/commit/c65bec06acb589ad087f6287518dfa99f3b95e78)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.36.0 ([#2655](https://github.com/googleapis/java-bigquerystorage/issues/2655)) ([289fe16](https://github.com/googleapis/java-bigquerystorage/commit/289fe161a82a2a786a1bf68b3805a8f094e55384)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.36.1 ([#2661](https://github.com/googleapis/java-bigquerystorage/issues/2661)) ([286b5be](https://github.com/googleapis/java-bigquerystorage/commit/286b5bea3922efea7db61ff2051d495d5e598053)) +* Update dependency com.google.cloud.opentelemetry:exporter-metrics to v0.32.0 ([#2657](https://github.com/googleapis/java-bigquerystorage/issues/2657)) ([480b373](https://github.com/googleapis/java-bigquerystorage/commit/480b3739372908c8140bc804979d4b7f0be102ef)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.11.1 ([#2660](https://github.com/googleapis/java-bigquerystorage/issues/2660)) ([95a9977](https://github.com/googleapis/java-bigquerystorage/commit/95a9977c963864cfee24a38cc644920bd72f18cc)) + +## [3.9.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.9.0...v3.9.1) (2024-09-11) + + +### Dependencies + +* Update core dependencies to v1.25.0 ([#2629](https://github.com/googleapis/java-bigquerystorage/issues/2629)) ([904a439](https://github.com/googleapis/java-bigquerystorage/commit/904a439a62582ea765b5766f60fa7b055d9f3ee9)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.42.1 ([#2623](https://github.com/googleapis/java-bigquerystorage/issues/2623)) ([853be58](https://github.com/googleapis/java-bigquerystorage/commit/853be584d5622ae8d87f43467a6c0df66844df8a)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.42.2 ([#2626](https://github.com/googleapis/java-bigquerystorage/issues/2626)) ([6a2fcac](https://github.com/googleapis/java-bigquerystorage/commit/6a2fcac7ef80bd3ec4d84400c139309d3551360d)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.35.0 ([#2633](https://github.com/googleapis/java-bigquerystorage/issues/2633)) ([7f6b28d](https://github.com/googleapis/java-bigquerystorage/commit/7f6b28da1efc90840c26152c7118c89153a43162)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.42.1 ([#2631](https://github.com/googleapis/java-bigquerystorage/issues/2631)) ([b4b7cca](https://github.com/googleapis/java-bigquerystorage/commit/b4b7ccae25bbe606d44c330ece7e268678697b68)) +* Update dependency io.opentelemetry:opentelemetry-exporter-logging to v1.42.1 ([#2632](https://github.com/googleapis/java-bigquerystorage/issues/2632)) ([3cf4182](https://github.com/googleapis/java-bigquerystorage/commit/3cf4182747f342e58253f9d6bd2205df4008171c)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.3 ([#2635](https://github.com/googleapis/java-bigquerystorage/issues/2635)) ([b16b4b7](https://github.com/googleapis/java-bigquerystorage/commit/b16b4b7c1bf8d9bb5f0830abe1b47ded95596ca7)) + +## [3.9.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.8.1...v3.9.0) (2024-08-27) + + +### Features + +* V1alpha client generation ([#2615](https://github.com/googleapis/java-bigquerystorage/issues/2615)) ([061691c](https://github.com/googleapis/java-bigquerystorage/commit/061691ccdfe56ae0813e376b8f1946cd2a41617b)) + + +### Dependencies + +* Update dependency com.google.http-client:google-http-client to v1.45.0 ([#2616](https://github.com/googleapis/java-bigquerystorage/issues/2616)) ([93b30f9](https://github.com/googleapis/java-bigquerystorage/commit/93b30f91cbe103ab44d10ca64cf344304f342600)) + +## [3.8.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.8.0...v3.8.1) (2024-08-20) + + +### Bug Fixes + +* Github workflow vulnerable to script injection ([#2600](https://github.com/googleapis/java-bigquerystorage/issues/2600)) ([9ce25b6](https://github.com/googleapis/java-bigquerystorage/commit/9ce25b6c96bcb5b89f69b8deee65c80c4545758f)) +* RequestProfilerTest.java regex in deep-preserve list ([#2589](https://github.com/googleapis/java-bigquerystorage/issues/2589)) ([e62ac66](https://github.com/googleapis/java-bigquerystorage/commit/e62ac664fb8e7056481ad29547bb33ae73ad2ef0)) + + +### Dependencies + +* Update core dependencies to v1.24.1 ([#2604](https://github.com/googleapis/java-bigquerystorage/issues/2604)) ([eaac3dc](https://github.com/googleapis/java-bigquerystorage/commit/eaac3dc886fe2b4cdcc8cca71fdba4b8055d70f1)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.42.0 ([#2586](https://github.com/googleapis/java-bigquerystorage/issues/2586)) ([8893d43](https://github.com/googleapis/java-bigquerystorage/commit/8893d435597dd393f39225eaa186bfb637240816)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.34.0 ([#2608](https://github.com/googleapis/java-bigquerystorage/issues/2608)) ([b4861b4](https://github.com/googleapis/java-bigquerystorage/commit/b4861b43f873037b8e20da445f0d6e125eab01b9)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.41.0 ([#2603](https://github.com/googleapis/java-bigquerystorage/issues/2603)) ([896903a](https://github.com/googleapis/java-bigquerystorage/commit/896903ac4ef5cbc315e6e0a6b1d882649f134cab)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.11.0 ([#2605](https://github.com/googleapis/java-bigquerystorage/issues/2605)) ([754e883](https://github.com/googleapis/java-bigquerystorage/commit/754e88326d99c1baa191eba511be49a28953632c)) + +## [3.8.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.7.0...v3.8.0) (2024-07-26) + + +### Features + +* Add opentelemetry counters for sent and acked messages ([#2532](https://github.com/googleapis/java-bigquerystorage/issues/2532)) ([2fc5c55](https://github.com/googleapis/java-bigquerystorage/commit/2fc5c5544cb32343a2ecd46445f7118e7254de8d)) + + +### Bug Fixes + +* Add `RequestProfile` classes to `deep-preserve` list ([#2574](https://github.com/googleapis/java-bigquerystorage/issues/2574)) ([2141f89](https://github.com/googleapis/java-bigquerystorage/commit/2141f89538e698eff64a665e34ad32b181b7c04a)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.33.0 ([#2577](https://github.com/googleapis/java-bigquerystorage/issues/2577)) ([5c143a0](https://github.com/googleapis/java-bigquerystorage/commit/5c143a0d0e165fcacc5cf00dee5e1a391726dc5c)) + +## [3.7.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.6.1...v3.7.0) (2024-07-19) + + +### Features + +* Add profiler for request execution details for write api connection worker ([#2555](https://github.com/googleapis/java-bigquerystorage/issues/2555)) ([5691bd5](https://github.com/googleapis/java-bigquerystorage/commit/5691bd54a5f31bb11bcb610a2e793259fa064656)) +* Enable hermetic library generation ([#2515](https://github.com/googleapis/java-bigquerystorage/issues/2515)) ([e1b14c1](https://github.com/googleapis/java-bigquerystorage/commit/e1b14c133a7fd75c7ddf1019f0e947d8dce30538)) +* Wire and expose profiler api to the StreamWirter/JsonStreamWriter ([#2561](https://github.com/googleapis/java-bigquerystorage/issues/2561)) ([16f19dd](https://github.com/googleapis/java-bigquerystorage/commit/16f19ddb111ff145008d89dddc159bb1a0cb32f4)) + + +### Bug Fixes + +* Correct hermetic library generation script path ([#2545](https://github.com/googleapis/java-bigquerystorage/issues/2545)) ([55cc139](https://github.com/googleapis/java-bigquerystorage/commit/55cc139ab5d66ef44b6ff4c71477be1f4008bd44)) +* Remove singleton access to request profiler. Use instance of hook instead. ([#2567](https://github.com/googleapis/java-bigquerystorage/issues/2567)) ([2f45fa2](https://github.com/googleapis/java-bigquerystorage/commit/2f45fa2d16eed5fb3a692657142b058f06115d49)) +* Using context from call in ReadRowsRetryingCallable ([#2560](https://github.com/googleapis/java-bigquerystorage/issues/2560)) ([eeb19b7](https://github.com/googleapis/java-bigquerystorage/commit/eeb19b72629f28fd740c61f60cc8856577f4b8a9)) + + +### Dependencies + +* Update core dependencies to v1.24.0 ([#2554](https://github.com/googleapis/java-bigquerystorage/issues/2554)) ([4797801](https://github.com/googleapis/java-bigquerystorage/commit/4797801120935def1b38834fb752cf552a5c73c3)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.41.0 ([#2539](https://github.com/googleapis/java-bigquerystorage/issues/2539)) ([5d17db5](https://github.com/googleapis/java-bigquerystorage/commit/5d17db52f509e1218f4f4cc0d0295f2e51bad3e1)) +* Update dependency com.google.truth:truth to v1.4.3 ([#2542](https://github.com/googleapis/java-bigquerystorage/issues/2542)) ([f45bf86](https://github.com/googleapis/java-bigquerystorage/commit/f45bf868dfda6b7662617c3259ad637383ffea7e)) +* Update dependency com.google.truth:truth to v1.4.4 ([#2557](https://github.com/googleapis/java-bigquerystorage/issues/2557)) ([41fd95c](https://github.com/googleapis/java-bigquerystorage/commit/41fd95cc37272d094400c49f2114158adb52a95c)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.40.0 ([#2552](https://github.com/googleapis/java-bigquerystorage/issues/2552)) ([ac26913](https://github.com/googleapis/java-bigquerystorage/commit/ac269139bdd53ecb8b453da99bdee68b5c1256a9)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.10.3 ([#2543](https://github.com/googleapis/java-bigquerystorage/issues/2543)) ([f36d734](https://github.com/googleapis/java-bigquerystorage/commit/f36d734120496d61123c48dab47fb0fb3b134f61)) + +## [3.6.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.6.0...v3.6.1) (2024-06-25) + + +### Dependencies + +* Update actions/checkout digest to 692973e ([#2530](https://github.com/googleapis/java-bigquerystorage/issues/2530)) ([3f2e689](https://github.com/googleapis/java-bigquerystorage/commit/3f2e6896ec208bcc850af7b9e54804637e214206)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.40.3 ([#2528](https://github.com/googleapis/java-bigquerystorage/issues/2528)) ([952af9e](https://github.com/googleapis/java-bigquerystorage/commit/952af9ef73253fc7f77c8700a06b9df446c3e981)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.32.0 ([#2535](https://github.com/googleapis/java-bigquerystorage/issues/2535)) ([0a9a0c0](https://github.com/googleapis/java-bigquerystorage/commit/0a9a0c01834c3a4e0f6bedc437c0cdb6f38362c1)) + +## [3.6.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.5.2...v3.6.0) (2024-06-11) + + +### Features + +* Add ability to write Range values with JSONStreamWriter ([#2498](https://github.com/googleapis/java-bigquerystorage/issues/2498)) ([a5e62be](https://github.com/googleapis/java-bigquerystorage/commit/a5e62bedf35238f440f7b52dc3e1d3c972e4acdd)) +* Add instrumentation for a couple of OpenTelemetry metrics ([#2501](https://github.com/googleapis/java-bigquerystorage/issues/2501)) ([195ea96](https://github.com/googleapis/java-bigquerystorage/commit/195ea96a68cc117974a8a74ae1c031ca30a6ae21)) + + +### Dependencies + +* Update actions/checkout digest to a5ac7e5 ([#2508](https://github.com/googleapis/java-bigquerystorage/issues/2508)) ([e84e877](https://github.com/googleapis/java-bigquerystorage/commit/e84e877f45e76c14102910d35b5818b49f2b9405)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.40.2 ([#2513](https://github.com/googleapis/java-bigquerystorage/issues/2513)) ([09e3b2f](https://github.com/googleapis/java-bigquerystorage/commit/09e3b2fa5792d5c88239e6713f826ff44a6293fc)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.31.0 ([#2522](https://github.com/googleapis/java-bigquerystorage/issues/2522)) ([72e1450](https://github.com/googleapis/java-bigquerystorage/commit/72e14509bc8f672af082d1dbd3f69e44e259b75c)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.39.0 ([#2524](https://github.com/googleapis/java-bigquerystorage/issues/2524)) ([25341d2](https://github.com/googleapis/java-bigquerystorage/commit/25341d20fa95feb164ae60d854d026a23882763c)) + +## [3.5.2](https://github.com/googleapis/java-bigquerystorage/compare/v3.5.1...v3.5.2) (2024-05-24) + + +### Bug Fixes + +* Reset send timestamp each time a request is sent ([#2499](https://github.com/googleapis/java-bigquerystorage/issues/2499)) ([b1b62b1](https://github.com/googleapis/java-bigquerystorage/commit/b1b62b1a8b89e8b5b950e0f5e35f385c35bae2da)) + + +### Dependencies + +* Update actions/checkout digest to 0ad4b8f ([#2496](https://github.com/googleapis/java-bigquerystorage/issues/2496)) ([22e8f62](https://github.com/googleapis/java-bigquerystorage/commit/22e8f62940cedfa051b77383773324bbcc79bb95)) +* Update actions/checkout digest to 44c2b7a ([#2494](https://github.com/googleapis/java-bigquerystorage/issues/2494)) ([87b435a](https://github.com/googleapis/java-bigquerystorage/commit/87b435a5add55ba7f0cad52c59df5c67bc0dfb70)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.40.0 ([#2488](https://github.com/googleapis/java-bigquerystorage/issues/2488)) ([85f7f05](https://github.com/googleapis/java-bigquerystorage/commit/85f7f05a249b9be3ab29bff0e4bba26032543423)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.40.1 ([#2492](https://github.com/googleapis/java-bigquerystorage/issues/2492)) ([f6c6f02](https://github.com/googleapis/java-bigquerystorage/commit/f6c6f027d63d0d3d74d7791affd98e7ee359aabc)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.30.1 ([#2502](https://github.com/googleapis/java-bigquerystorage/issues/2502)) ([ed4d1a3](https://github.com/googleapis/java-bigquerystorage/commit/ed4d1a38adee35450b73aefa21e25d13e8bf8469)) +* Update dependency com.google.http-client:google-http-client to v1.44.2 ([#2503](https://github.com/googleapis/java-bigquerystorage/issues/2503)) ([66840e6](https://github.com/googleapis/java-bigquerystorage/commit/66840e6acd832702157d434762eef66e00c4c5df)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.2 ([#2504](https://github.com/googleapis/java-bigquerystorage/issues/2504)) ([bebbd21](https://github.com/googleapis/java-bigquerystorage/commit/bebbd218623554bae32eeeda81e3c9f166d31b93)) + +## [3.5.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.5.0...v3.5.1) (2024-05-06) + + +### Dependencies + +* Update actions/checkout digest to 0ad4b8f ([#2474](https://github.com/googleapis/java-bigquerystorage/issues/2474)) ([d8d5278](https://github.com/googleapis/java-bigquerystorage/commit/d8d5278ca54317a599e7c8b7c661eedd075f6a74)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.39.1 ([#2480](https://github.com/googleapis/java-bigquerystorage/issues/2480)) ([2c6bcbb](https://github.com/googleapis/java-bigquerystorage/commit/2c6bcbbe727518b1a1d784d5d9f9653af5759886)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.30.0 ([#2483](https://github.com/googleapis/java-bigquerystorage/issues/2483)) ([8007acf](https://github.com/googleapis/java-bigquerystorage/commit/8007acfdb1965fa5a6c9308189c92ae0f523faad)) + +## [3.5.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.4.0...v3.5.0) (2024-04-26) + + +### Features + +* Add `libraries_bom_version` in metadata ([#1956](https://github.com/googleapis/java-bigquerystorage/issues/1956)) ([#2463](https://github.com/googleapis/java-bigquerystorage/issues/2463)) ([b35bd4a](https://github.com/googleapis/java-bigquerystorage/commit/b35bd4a631ad6411531cd9056d01e829a0863b39)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.39.0 ([#2467](https://github.com/googleapis/java-bigquerystorage/issues/2467)) ([e2bc82d](https://github.com/googleapis/java-bigquerystorage/commit/e2bc82dde45c8c8e01bbe221f5817b02e9c5b7ed)) + + +### Dependencies + +* Update actions/checkout digest to 1d96c77 ([#2469](https://github.com/googleapis/java-bigquerystorage/issues/2469)) ([8efb813](https://github.com/googleapis/java-bigquerystorage/commit/8efb8131ff89b57509b4b122c75f765c62514b1c)) +* Update arrow.version to v16 ([#2468](https://github.com/googleapis/java-bigquerystorage/issues/2468)) ([d143c65](https://github.com/googleapis/java-bigquerystorage/commit/d143c65b6c92b98d321408458a6b2bc18e42593b)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.38.2 ([#2453](https://github.com/googleapis/java-bigquerystorage/issues/2453)) ([a054ddd](https://github.com/googleapis/java-bigquerystorage/commit/a054dddb20962c610a7af5c8cd76c93082df55fa)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.39.0 ([#2473](https://github.com/googleapis/java-bigquerystorage/issues/2473)) ([4eadc4a](https://github.com/googleapis/java-bigquerystorage/commit/4eadc4a5e63acf2daba7589f804b5facad1452fa)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.28.1 ([#2455](https://github.com/googleapis/java-bigquerystorage/issues/2455)) ([80269b0](https://github.com/googleapis/java-bigquerystorage/commit/80269b0f861ab9f5a2426668e10b35e963019395)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.29.0 ([#2465](https://github.com/googleapis/java-bigquerystorage/issues/2465)) ([c082c36](https://github.com/googleapis/java-bigquerystorage/commit/c082c363fc365962dc9654a9cefb7602711c03f8)) + +## [3.4.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.3.1...v3.4.0) (2024-03-19) + + +### Features + +* Add short, integer, long, boolean conversions into string ([#2437](https://github.com/googleapis/java-bigquerystorage/issues/2437)) ([4f4216e](https://github.com/googleapis/java-bigquerystorage/commit/4f4216e21b1949da096c9d180c64086ba1edfd8f)) +* Provide the default stream name ([#2444](https://github.com/googleapis/java-bigquerystorage/issues/2444)) ([fe9c3ae](https://github.com/googleapis/java-bigquerystorage/commit/fe9c3ae6d9459cf5273cf9440bd6eecc27b71a7d)) + + +### Bug Fixes + +* Accept default stream name ([#2432](https://github.com/googleapis/java-bigquerystorage/issues/2432)) ([d58a314](https://github.com/googleapis/java-bigquerystorage/commit/d58a3146458a4593876e1bb62d3c2b6d0bfb1c05)) +* Add version id to the client lib header ([#2447](https://github.com/googleapis/java-bigquerystorage/issues/2447)) ([43fd699](https://github.com/googleapis/java-bigquerystorage/commit/43fd699798db8053c2248d3d0506387a1f695b5f)) +* Also shutdown the stream connection in case the timeout exception is ([#2445](https://github.com/googleapis/java-bigquerystorage/issues/2445)) ([6e267fb](https://github.com/googleapis/java-bigquerystorage/commit/6e267fb4aaadf8bd61192a709b97cb9b799983db)) +* Fix WriteToDefaultStream example code to close the client properly ([#2433](https://github.com/googleapis/java-bigquerystorage/issues/2433)) ([d8a52f8](https://github.com/googleapis/java-bigquerystorage/commit/d8a52f886aef174bf01147e81728de6b2f09f4c3)) + + +### Dependencies + +* Update arrow.version to v15.0.1 ([#2442](https://github.com/googleapis/java-bigquerystorage/issues/2442)) ([d629194](https://github.com/googleapis/java-bigquerystorage/commit/d629194c97b75279cfa07e8dc4520868375d1c3e)) +* Update arrow.version to v15.0.2 ([#2449](https://github.com/googleapis/java-bigquerystorage/issues/2449)) ([27ded96](https://github.com/googleapis/java-bigquerystorage/commit/27ded96d7935467f61d0aefe2bd524590ea3e973)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.38.1 ([#2434](https://github.com/googleapis/java-bigquerystorage/issues/2434)) ([d99b6bd](https://github.com/googleapis/java-bigquerystorage/commit/d99b6bd31653bcc07302c089f87af88796af5bad)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.28.1 ([#2448](https://github.com/googleapis/java-bigquerystorage/issues/2448)) ([6ae3d07](https://github.com/googleapis/java-bigquerystorage/commit/6ae3d07ffda047bd8288bd00d01f14aa37ce36f8)) + +## [3.3.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.3.0...v3.3.1) (2024-03-06) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.27.0 ([#2425](https://github.com/googleapis/java-bigquerystorage/issues/2425)) ([488d282](https://github.com/googleapis/java-bigquerystorage/commit/488d28287fdaefff02e7ad9f9f7c8da6ac873671)) + +## [3.3.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.2.0...v3.3.0) (2024-03-04) + + +### Features + +* Add RetrySettings use to Write API samples. ([#2419](https://github.com/googleapis/java-bigquerystorage/issues/2419)) ([5b000d0](https://github.com/googleapis/java-bigquerystorage/commit/5b000d0a4e953649ca4e44bffd3ba25c288e70e4)) +* Add the RANGE type to the google.cloud.bigquery.storage.v1.TableFieldSchema ([#2413](https://github.com/googleapis/java-bigquerystorage/issues/2413)) ([6aa92b5](https://github.com/googleapis/java-bigquerystorage/commit/6aa92b5d03eed548de9e89b0731707f92c373ce3)) +* Next release from main branch is 2.48.0 ([#2420](https://github.com/googleapis/java-bigquerystorage/issues/2420)) ([2dd8efc](https://github.com/googleapis/java-bigquerystorage/commit/2dd8efc4a21f186c20e86304092d22fd574e822e)) + + +### Bug Fixes + +* Fix issue where Universe Domain is not correctly set. ([#2423](https://github.com/googleapis/java-bigquerystorage/issues/2423)) ([b7ebd73](https://github.com/googleapis/java-bigquerystorage/commit/b7ebd73754fe51a4835f676e429b95b80f8114a5)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.38.0 ([#2417](https://github.com/googleapis/java-bigquerystorage/issues/2417)) ([8184a0f](https://github.com/googleapis/java-bigquerystorage/commit/8184a0f74e98340fe62621957f3ac78ad70c9edb)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.38.0 ([#2418](https://github.com/googleapis/java-bigquerystorage/issues/2418)) ([4d5eb73](https://github.com/googleapis/java-bigquerystorage/commit/4d5eb7343cdf5fd617a2da120642bfd678d58f94)) +* Update dependency com.google.truth:truth to v1.4.1 ([#2412](https://github.com/googleapis/java-bigquerystorage/issues/2412)) ([c2dcb73](https://github.com/googleapis/java-bigquerystorage/commit/c2dcb73c54ad5dcb68ce18741efaf479298373e1)) +* Update dependency com.google.truth:truth to v1.4.2 ([#2424](https://github.com/googleapis/java-bigquerystorage/issues/2424)) ([7a12de0](https://github.com/googleapis/java-bigquerystorage/commit/7a12de0ebc6a0a0f4bb438e3dcb36ee0759c0b94)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.1 ([#2414](https://github.com/googleapis/java-bigquerystorage/issues/2414)) ([dfa8d53](https://github.com/googleapis/java-bigquerystorage/commit/dfa8d532e5e1a28d644fefed6650ca1a2481a3a3)) +* Update dependency org.json:json to v20240303 ([#2426](https://github.com/googleapis/java-bigquerystorage/issues/2426)) ([2accca7](https://github.com/googleapis/java-bigquerystorage/commit/2accca72f2bd26c9a1cf1bd918961889b9ee3ace)) + + +### Documentation + +* Mark BigQueryWrite v1beta2 as deprecated ([#2421](https://github.com/googleapis/java-bigquerystorage/issues/2421)) ([07d98ab](https://github.com/googleapis/java-bigquerystorage/commit/07d98ab417ae9fd2f90aca4d6a1a3ff62ef0bc1d)) + +## [3.2.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.1.0...v3.2.0) (2024-02-15) + + +### Features + +* Introduce MaximumRequestCallbackWaitTimeExceededException ([#2401](https://github.com/googleapis/java-bigquerystorage/issues/2401)) ([0dbbfb8](https://github.com/googleapis/java-bigquerystorage/commit/0dbbfb80632e17b06bff04760e129df16149be4b)) + + +### Bug Fixes + +* Add client id and update trace id population for StreamWriter and JsonWriter ([#2389](https://github.com/googleapis/java-bigquerystorage/issues/2389)) ([4258af4](https://github.com/googleapis/java-bigquerystorage/commit/4258af42f8ab4494a254637fd018159ea426e0b7)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.35.0 ([#2407](https://github.com/googleapis/java-bigquerystorage/issues/2407)) ([59a1ede](https://github.com/googleapis/java-bigquerystorage/commit/59a1ede76bf37d4f71e8d0b89a1610277091f5ec)) + + +### Dependencies + +* Update core dependencies to v1.23.0 ([#2403](https://github.com/googleapis/java-bigquerystorage/issues/2403)) ([68eaae7](https://github.com/googleapis/java-bigquerystorage/commit/68eaae7851ccc2b698eb06a88c11719905fca049)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.37.1 ([#2402](https://github.com/googleapis/java-bigquerystorage/issues/2402)) ([ac13acc](https://github.com/googleapis/java-bigquerystorage/commit/ac13acc3b363ac954c2efdeff86fe7ca1eaeb0a8)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.37.2 ([#2409](https://github.com/googleapis/java-bigquerystorage/issues/2409)) ([c95f4b4](https://github.com/googleapis/java-bigquerystorage/commit/c95f4b46e1040be606deace3bd0ec952d07cfc4d)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.25.0 ([#2406](https://github.com/googleapis/java-bigquerystorage/issues/2406)) ([2184b82](https://github.com/googleapis/java-bigquerystorage/commit/2184b82f322737b952d2ddab68bc37ded02f30a5)) +* Update dependency com.google.truth:truth to v1.4.0 ([#2394](https://github.com/googleapis/java-bigquerystorage/issues/2394)) ([3234fc9](https://github.com/googleapis/java-bigquerystorage/commit/3234fc93476342774a92febd14fb4a9a08bca330)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.0 ([#2392](https://github.com/googleapis/java-bigquerystorage/issues/2392)) ([9106284](https://github.com/googleapis/java-bigquerystorage/commit/91062842986166f156abbf5eefa986e3a134ef46)) +* Update dependency org.json:json to v20240205 ([#2398](https://github.com/googleapis/java-bigquerystorage/issues/2398)) ([7e6481d](https://github.com/googleapis/java-bigquerystorage/commit/7e6481d3daf1c3930c48670ccd774f1629e22360)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.10.2 ([#2397](https://github.com/googleapis/java-bigquerystorage/issues/2397)) ([f01182b](https://github.com/googleapis/java-bigquerystorage/commit/f01182b63fb31dd8a1d95f5e084b3f2ddb2e8580)) + +## [3.1.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.0.1...v3.1.0) (2024-02-02) + + +### Features + +* Enable instream retry for default streams when Multiplexing. ([#2376](https://github.com/googleapis/java-bigquerystorage/issues/2376)) ([9a18523](https://github.com/googleapis/java-bigquerystorage/commit/9a18523503dcfc0a787985075d7193efa472c32f)) + + +### Bug Fixes + +* Add a bit more message so customers are not going to be scared on retryable errors. ([#2386](https://github.com/googleapis/java-bigquerystorage/issues/2386)) ([90331a3](https://github.com/googleapis/java-bigquerystorage/commit/90331a32447d28df99d483482ee222746da32acb)) +* Split connection pool based on credential ([#2388](https://github.com/googleapis/java-bigquerystorage/issues/2388)) ([08bc846](https://github.com/googleapis/java-bigquerystorage/commit/08bc846813e0b70bd940878658f9c33903c1416d)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.37.0 ([#2382](https://github.com/googleapis/java-bigquerystorage/issues/2382)) ([27e0bc7](https://github.com/googleapis/java-bigquerystorage/commit/27e0bc7c642de6c86e5c27834994bdd07f6e7279)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.24.0 ([#2390](https://github.com/googleapis/java-bigquerystorage/issues/2390)) ([3bb0149](https://github.com/googleapis/java-bigquerystorage/commit/3bb0149c9152905ee4c6c00fd3a55ba8502a9229)) +* Update dependency com.google.http-client:google-http-client to v1.44.1 ([#2384](https://github.com/googleapis/java-bigquerystorage/issues/2384)) ([6aecf34](https://github.com/googleapis/java-bigquerystorage/commit/6aecf34a001aea37ba0d0a2ebb0ed62619147d3d)) + +## [3.0.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.0.0...v3.0.1) (2024-01-25) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.32.0 ([#2363](https://github.com/googleapis/java-bigquerystorage/issues/2363)) ([542b946](https://github.com/googleapis/java-bigquerystorage/commit/542b94606de6790f54ebeb546dc84f30faeb58b2)) + + +### Dependencies + +* Update arrow.version to v15 ([#2372](https://github.com/googleapis/java-bigquerystorage/issues/2372)) ([4b59eec](https://github.com/googleapis/java-bigquerystorage/commit/4b59eec20da86195ab5051e585bfe20d149af863)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.36.0 ([#2364](https://github.com/googleapis/java-bigquerystorage/issues/2364)) ([26f3429](https://github.com/googleapis/java-bigquerystorage/commit/26f342990fee9c12cb1ee3d016a6d2f434d7b85d)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.22.0 ([#2370](https://github.com/googleapis/java-bigquerystorage/issues/2370)) ([adcc49e](https://github.com/googleapis/java-bigquerystorage/commit/adcc49e96a445c4dd2c68be7c82a1d750579d0d3)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.23.0 ([#2378](https://github.com/googleapis/java-bigquerystorage/issues/2378)) ([2d8245f](https://github.com/googleapis/java-bigquerystorage/commit/2d8245fbf1143308410ea2a86ca98b0289a04c29)) +* Update dependency com.google.truth:truth to v1.3.0 ([#2371](https://github.com/googleapis/java-bigquerystorage/issues/2371)) ([8b39e8a](https://github.com/googleapis/java-bigquerystorage/commit/8b39e8af6a88312b39e1283a52bcd52c119c5f30)) + +## [3.0.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.47.0...v3.0.0) (2024-01-11) + + +### ⚠ BREAKING CHANGES + +* use table name create default write stream issue fix ([#2119](https://github.com/googleapis/java-bigquerystorage/issues/2119)) + +### Features + +* Add configuration in json steam writer to configure the timeout in request waiting queue ([#2362](https://github.com/googleapis/java-bigquerystorage/issues/2362)) ([db527be](https://github.com/googleapis/java-bigquerystorage/commit/db527be024cd0e8c36d3b22e0b8f5c185c56f74f)) +* Adding deprecated message to v1beta2 manual client ([#2098](https://github.com/googleapis/java-bigquerystorage/issues/2098)) ([f150780](https://github.com/googleapis/java-bigquerystorage/commit/f150780454a29770781e3bdd1a21718efe92de56)) +* Exponentially backoff on INTERNAL errors for Default streams ([#2358](https://github.com/googleapis/java-bigquerystorage/issues/2358)) ([472a36f](https://github.com/googleapis/java-bigquerystorage/commit/472a36f214ea586c51a2e93ba8e4944892bbcb8c)) +* Use table name create default write stream issue fix ([#2119](https://github.com/googleapis/java-bigquerystorage/issues/2119)) ([91a2bec](https://github.com/googleapis/java-bigquerystorage/commit/91a2becf7a296b1fd33fe880f7a3b710eab4783c)) + + +### Bug Fixes + +* Add a e2e json test ([#2062](https://github.com/googleapis/java-bigquerystorage/issues/2062)) ([1893b3f](https://github.com/googleapis/java-bigquerystorage/commit/1893b3f6091972f99226dd28626bd1e638bef597)) +* Do not use regexp for BigQuerySchemaUtil#isProtoCompatible ([#2226](https://github.com/googleapis/java-bigquerystorage/issues/2226)) ([1741166](https://github.com/googleapis/java-bigquerystorage/commit/1741166d3aa6a3bab8dd8cb74b3be8f7b4ac4ac3)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.35.0 ([#2348](https://github.com/googleapis/java-bigquerystorage/issues/2348)) ([3395649](https://github.com/googleapis/java-bigquerystorage/commit/33956494008446fe194fed93cbc2136f4e9383be)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.21.0 ([#2361](https://github.com/googleapis/java-bigquerystorage/issues/2361)) ([95d4214](https://github.com/googleapis/java-bigquerystorage/commit/95d4214ae26468fa63fd101eeb0bb8d5ec656a75)) +* Update dependency com.google.truth:truth to v1.2.0 ([#2360](https://github.com/googleapis/java-bigquerystorage/issues/2360)) ([38a285f](https://github.com/googleapis/java-bigquerystorage/commit/38a285f33fa83d16637ec275d965e8f3ff880bd9)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.28 ([#2349](https://github.com/googleapis/java-bigquerystorage/issues/2349)) ([6910035](https://github.com/googleapis/java-bigquerystorage/commit/69100356eb0bb920fcffb7b4e92c74722e84f8b1)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.10.1 ([#2350](https://github.com/googleapis/java-bigquerystorage/issues/2350)) ([91caf73](https://github.com/googleapis/java-bigquerystorage/commit/91caf73f1896553a2a3ba4c8109cbbb93ba47f34)) + +## [2.47.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.46.0...v2.47.0) (2023-12-01) + + +### Features + +* Adding CDC sample ([#2263](https://github.com/googleapis/java-bigquerystorage/issues/2263)) ([ba134e4](https://github.com/googleapis/java-bigquerystorage/commit/ba134e403f7696d5e797faf7c07ff817b90f1085)) + + +### Bug Fixes + +* An atempt to solve test failure in nightly build ([#2330](https://github.com/googleapis/java-bigquerystorage/issues/2330)) ([f77465e](https://github.com/googleapis/java-bigquerystorage/commit/f77465e094ca9b00fc2eb6882a69b9eb9dfd8edb)) +* Fix a test that is flaky ([#2340](https://github.com/googleapis/java-bigquerystorage/issues/2340)) ([cb6e2c9](https://github.com/googleapis/java-bigquerystorage/commit/cb6e2c97c854d89c2679d17a24f021a676f25039)) +* Fix the write api integration test ([#2333](https://github.com/googleapis/java-bigquerystorage/issues/2333)) ([a69bec9](https://github.com/googleapis/java-bigquerystorage/commit/a69bec9c017509ef3beb975e2b6d929c12420c0c)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.34.2 ([#2313](https://github.com/googleapis/java-bigquerystorage/issues/2313)) ([1e1b60c](https://github.com/googleapis/java-bigquerystorage/commit/1e1b60cbc036bd8ed48736e3c53a10bc3a76c821)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.20.0 ([#2336](https://github.com/googleapis/java-bigquerystorage/issues/2336)) ([6ada6c5](https://github.com/googleapis/java-bigquerystorage/commit/6ada6c5fd411576c2c27ab94e7ef47f7ba02629b)) + +## [2.46.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.45.0...v2.46.0) (2023-11-15) + + +### Features + +* Add integration tests with RetrySettings enabled. ([#2275](https://github.com/googleapis/java-bigquerystorage/issues/2275)) ([179193a](https://github.com/googleapis/java-bigquerystorage/commit/179193a18ba31c4c82946dc48671512fdac335c8)) +* Add new configuration for nightly retry-related tests. ([#2319](https://github.com/googleapis/java-bigquerystorage/issues/2319)) ([e78b841](https://github.com/googleapis/java-bigquerystorage/commit/e78b84151d1e7f9a4cfc6f252b6215fce4cec670)) +* Enable 20MB request limit, this feature is allowlist only. ([#2311](https://github.com/googleapis/java-bigquerystorage/issues/2311)) ([75c2552](https://github.com/googleapis/java-bigquerystorage/commit/75c2552a2ece47a8e14ad4932d4fcc36c8005cbe)) + + +### Bug Fixes + +* Add comment/documentation for setRetrySettings ([#2309](https://github.com/googleapis/java-bigquerystorage/issues/2309)) ([664b550](https://github.com/googleapis/java-bigquerystorage/commit/664b550258dbe6037972b4a94eb41acd254849c5)) +* Create next attempt after first attempt to initialize exponential backoff settings. ([#2316](https://github.com/googleapis/java-bigquerystorage/issues/2316)) ([e5884cc](https://github.com/googleapis/java-bigquerystorage/commit/e5884cc0a28a5b3fc6aab6854c6b435431524d42)) +* Enable request limit e2e test. ([#2320](https://github.com/googleapis/java-bigquerystorage/issues/2320)) ([9d8c368](https://github.com/googleapis/java-bigquerystorage/commit/9d8c36859d5dd39f2e5fad26b692aa95308b1f5e)) + + +### Dependencies + +* Update actions/github-script action to v7 ([#2317](https://github.com/googleapis/java-bigquerystorage/issues/2317)) ([f68064f](https://github.com/googleapis/java-bigquerystorage/commit/f68064f2991768656fdefdce7e089313ee0f1e7e)) + +## [2.45.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.44.1...v2.45.0) (2023-11-07) + + +### Features + +* Add support for proto3 optional tag ([#2295](https://github.com/googleapis/java-bigquerystorage/issues/2295)) ([bc88aa5](https://github.com/googleapis/java-bigquerystorage/commit/bc88aa57f55b2c3605cbb9c66f2e66daa695c20c)) + + +### Bug Fixes + +* Add native image configurations for com.google.rpc classes ([#2305](https://github.com/googleapis/java-bigquerystorage/issues/2305)) ([3a771fd](https://github.com/googleapis/java-bigquerystorage/commit/3a771fded7c0f5243be1e4c76e7229f15eda95fa)) +* **bigquery:** Make exponential backoff retry second based ([#2212](https://github.com/googleapis/java-bigquerystorage/issues/2212)) ([eff4a09](https://github.com/googleapis/java-bigquerystorage/commit/eff4a09aece936dfee7fff46e7bd031ec780ab96)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.34.0 ([#2291](https://github.com/googleapis/java-bigquerystorage/issues/2291)) ([5377f79](https://github.com/googleapis/java-bigquerystorage/commit/5377f79300e38b192d36fe2e4ee42b34b8173aa1)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.34.1 ([#2308](https://github.com/googleapis/java-bigquerystorage/issues/2308)) ([6aee78f](https://github.com/googleapis/java-bigquerystorage/commit/6aee78f9d0f1918056d8fd2dd74bd0934733d496)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.19.0 ([#2300](https://github.com/googleapis/java-bigquerystorage/issues/2300)) ([1ddd11c](https://github.com/googleapis/java-bigquerystorage/commit/1ddd11c8910df6fc277d7bdd6856f85695a43009)) + +## [2.44.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.44.0...v2.44.1) (2023-10-25) + + +### Dependencies + +* Update actions/checkout digest to b4ffde6 ([#2281](https://github.com/googleapis/java-bigquerystorage/issues/2281)) ([8c78515](https://github.com/googleapis/java-bigquerystorage/commit/8c78515979e22a802e70def26dafa9512d785268)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.33.2 ([#2274](https://github.com/googleapis/java-bigquerystorage/issues/2274)) ([d55ce5e](https://github.com/googleapis/java-bigquerystorage/commit/d55ce5edd6f728cfce7ce0d59cf29d6e9d14d80a)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.18.0 ([#2282](https://github.com/googleapis/java-bigquerystorage/issues/2282)) ([12f3fce](https://github.com/googleapis/java-bigquerystorage/commit/12f3fce59ab95b6b0319c080b86916586e35ce00)) +* Update dependency org.json:json to v20231013 - abandoned ([#2276](https://github.com/googleapis/java-bigquerystorage/issues/2276)) ([0c5a60b](https://github.com/googleapis/java-bigquerystorage/commit/0c5a60b9ead20cbceda08805229ad6fa3f91c7e3)) +* Update dependency org.json:json to v20231013 [security] ([#2278](https://github.com/googleapis/java-bigquerystorage/issues/2278)) ([2b7887d](https://github.com/googleapis/java-bigquerystorage/commit/2b7887d0bab1e0534f1616f3b64a28d7a57518d6)) + +## [2.44.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.43.0...v2.44.0) (2023-10-10) + + +### Features + +* Add in-stream retry logic for retryable and quota errors ([#2243](https://github.com/googleapis/java-bigquerystorage/issues/2243)) ([6435a04](https://github.com/googleapis/java-bigquerystorage/commit/6435a0491827779b01dd0c3cf184f6578bf33f3e)) + + +### Dependencies + +* Bumping google-cloud-shared-config to v1.5.8 ([#2269](https://github.com/googleapis/java-bigquerystorage/issues/2269)) ([fb6e38d](https://github.com/googleapis/java-bigquerystorage/commit/fb6e38d575800ab4c7c16ae0545fdbd91ea358bd)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.33.1 ([#2258](https://github.com/googleapis/java-bigquerystorage/issues/2258)) ([f6cbea2](https://github.com/googleapis/java-bigquerystorage/commit/f6cbea204b5a414d8e2932ad2fd194996685ec39)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.17.0 ([#2266](https://github.com/googleapis/java-bigquerystorage/issues/2266)) ([827aaf7](https://github.com/googleapis/java-bigquerystorage/commit/827aaf70bc19de8d67d0f386085877eb5d2fbced)) + +## [2.43.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.42.0...v2.43.0) (2023-09-28) + + +### Features + +* Add sample code about default missingValueInterpretation ([#2249](https://github.com/googleapis/java-bigquerystorage/issues/2249)) ([ebedcc0](https://github.com/googleapis/java-bigquerystorage/commit/ebedcc07e400429fc6b80a4ad0543fe25eef970d)) + + +### Dependencies + +* Update actions/checkout digest to 8ade135 ([#2251](https://github.com/googleapis/java-bigquerystorage/issues/2251)) ([182e050](https://github.com/googleapis/java-bigquerystorage/commit/182e050d9929f9fb58694e76625b03bb54f67efe)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.32.0 ([#2246](https://github.com/googleapis/java-bigquerystorage/issues/2246)) ([893fcb9](https://github.com/googleapis/java-bigquerystorage/commit/893fcb9f6c4b2eb9814ea2597fe9aae95367b4cd)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.33.0 ([#2255](https://github.com/googleapis/java-bigquerystorage/issues/2255)) ([7689dee](https://github.com/googleapis/java-bigquerystorage/commit/7689dee5f70a144efb9eb9be1a058d11d7e3c05d)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.16.1 ([#2250](https://github.com/googleapis/java-bigquerystorage/issues/2250)) ([43d96d2](https://github.com/googleapis/java-bigquerystorage/commit/43d96d288207a607d168f604f190fb28b5eab132)) +* Update dependency org.apache.arrow:arrow-vector to v13 ([#2236](https://github.com/googleapis/java-bigquerystorage/issues/2236)) ([afde7ce](https://github.com/googleapis/java-bigquerystorage/commit/afde7ce1f48f1c7cdd4b06d4aabdaac9d367aa50)) +* Update dependency org.apache.avro:avro to v1.11.3 ([#2252](https://github.com/googleapis/java-bigquerystorage/issues/2252)) ([10b615b](https://github.com/googleapis/java-bigquerystorage/commit/10b615b49ba8889f7d051ac03d4751ace82b6823)) + +## [2.42.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.41.1...v2.42.0) (2023-09-13) + + +### Features + +* Add default_missing_value_interpretation field; indicate KMS_SERVICE_ERROR is retryable ([#2229](https://github.com/googleapis/java-bigquerystorage/issues/2229)) ([df686d6](https://github.com/googleapis/java-bigquerystorage/commit/df686d6ac51d182b52dbd1f5a69585bb605e9b94)) +* Expose settings to configure default missing value interpretation. ([#2230](https://github.com/googleapis/java-bigquerystorage/issues/2230)) ([dc5ed73](https://github.com/googleapis/java-bigquerystorage/commit/dc5ed73f513a77939286d3c129fc26f039c23d5c)) + + +### Bug Fixes + +* Populate final stauts to initial request during connection shutdown ([#2228](https://github.com/googleapis/java-bigquerystorage/issues/2228)) ([9b9b5c0](https://github.com/googleapis/java-bigquerystorage/commit/9b9b5c09d7bc458493338eced8527a168fff0129)) + + +### Dependencies + +* Update actions/checkout action to v4 ([#2237](https://github.com/googleapis/java-bigquerystorage/issues/2237)) ([d5d739f](https://github.com/googleapis/java-bigquerystorage/commit/d5d739fe7624b74584c1272f13635f728fdf53d2)) +* Update arrow.version to v13 ([#2234](https://github.com/googleapis/java-bigquerystorage/issues/2234)) ([ac45c2a](https://github.com/googleapis/java-bigquerystorage/commit/ac45c2aa189fa0cba05f88486f44d3b1d6f761ca)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.31.1 ([#2225](https://github.com/googleapis/java-bigquerystorage/issues/2225)) ([5144c5a](https://github.com/googleapis/java-bigquerystorage/commit/5144c5ad1e107f96d2003064cd2823982ac0e360)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.31.2 ([#2241](https://github.com/googleapis/java-bigquerystorage/issues/2241)) ([91e3730](https://github.com/googleapis/java-bigquerystorage/commit/91e37303f57ec2e211a375652a8eca8b7d39d1e6)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.15.0 ([#2239](https://github.com/googleapis/java-bigquerystorage/issues/2239)) ([5352a7d](https://github.com/googleapis/java-bigquerystorage/commit/5352a7d7e1d7b01a33936adf7e204c5f49f0c230)) +* Update dependency org.apache.arrow:arrow-memory-netty to v13 ([#2235](https://github.com/googleapis/java-bigquerystorage/issues/2235)) ([7e50bef](https://github.com/googleapis/java-bigquerystorage/commit/7e50bef3fa3c92c94aeefedca0ae87c5132bb1be)) + +## [2.41.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.41.0...v2.41.1) (2023-08-08) + + +### Bug Fixes + +* Remove FAILED_PRECONDITION from connection retry code ([#2214](https://github.com/googleapis/java-bigquerystorage/issues/2214)) ([b8e8a24](https://github.com/googleapis/java-bigquerystorage/commit/b8e8a2489fbd68b24c39ca2d1d90cbb9fc3be715)) +* Remove pre launch comments from enableConnectionPool ([#2215](https://github.com/googleapis/java-bigquerystorage/issues/2215)) ([fb72c18](https://github.com/googleapis/java-bigquerystorage/commit/fb72c181cc07e8870c100bb63a334e3a6e3d1fa3)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.31.0 ([#2206](https://github.com/googleapis/java-bigquerystorage/issues/2206)) ([7d935f0](https://github.com/googleapis/java-bigquerystorage/commit/7d935f094ec679fb1c72541340625c6d05580496)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.14.0 ([#2218](https://github.com/googleapis/java-bigquerystorage/issues/2218)) ([4d51a01](https://github.com/googleapis/java-bigquerystorage/commit/4d51a016fb9e2cf65f3b9f490cf0f6802d8ec5de)) + +## [2.41.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.40.1...v2.41.0) (2023-07-24) + + +### Features + +* Add enable connection pool to sample ([#2192](https://github.com/googleapis/java-bigquerystorage/issues/2192)) ([c2642de](https://github.com/googleapis/java-bigquerystorage/commit/c2642de07556381490c92eedf25b17d36218c2f8)) +* Support gRPC Compression ([#2197](https://github.com/googleapis/java-bigquerystorage/issues/2197)) ([642e345](https://github.com/googleapis/java-bigquerystorage/commit/642e3452f3b490d6d85b61dfd1a1bb82e175d565)) + + +### Bug Fixes + +* Reduce StreamConnection visibility since it is not supposed to be public. ([#2196](https://github.com/googleapis/java-bigquerystorage/issues/2196)) ([addbcdf](https://github.com/googleapis/java-bigquerystorage/commit/addbcdf04e330a76e29e41b1e8f4ca04ad96ed00)) +* Update SchemaAwareStreamWriter comment to down recommend it. ([#2195](https://github.com/googleapis/java-bigquerystorage/issues/2195)) ([4897c05](https://github.com/googleapis/java-bigquerystorage/commit/4897c05aeb7cddff20fb530d64034cc143c24d91)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.30.1 ([#2191](https://github.com/googleapis/java-bigquerystorage/issues/2191)) ([da4f2e5](https://github.com/googleapis/java-bigquerystorage/commit/da4f2e5e801daad1136eb4a3e753e486cef194e7)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.13.1 ([#2199](https://github.com/googleapis/java-bigquerystorage/issues/2199)) ([fc8f4fb](https://github.com/googleapis/java-bigquerystorage/commit/fc8f4fbae333db08ae21730a61a8993c3e1f9897)) + +## [2.40.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.40.0...v2.40.1) (2023-07-18) + + +### Dependencies + +* Remove auto value annotation version ([#2188](https://github.com/googleapis/java-bigquerystorage/issues/2188)) ([9186f04](https://github.com/googleapis/java-bigquerystorage/commit/9186f04eefff3a483d39c4c5db7aeb3e213ed4ca)) + +## [2.40.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.39.1...v2.40.0) (2023-07-17) + + +### Features + +* Add ResourceExhausted to retryable error for Write API unary calls ([#2178](https://github.com/googleapis/java-bigquerystorage/issues/2178)) ([d9b526a](https://github.com/googleapis/java-bigquerystorage/commit/d9b526a2e4109ef5ed95fb74373f2f13b06c7c54)) +* Improve json to proto conversion by caching schema ([#2179](https://github.com/googleapis/java-bigquerystorage/issues/2179)) ([afc550a](https://github.com/googleapis/java-bigquerystorage/commit/afc550aeacb0e3f26440eeb70d2cebbf65922c07)) + + +### Bug Fixes + +* Interpret Integer and Float values for TIMESTAMP as microseconds ([#2175](https://github.com/googleapis/java-bigquerystorage/issues/2175)) ([e5bb5d0](https://github.com/googleapis/java-bigquerystorage/commit/e5bb5d099ea0272c4bd447b7f8fef5207c14ffc5)) +* Support DATETIME field that has a space between date and time and has only date ([#2176](https://github.com/googleapis/java-bigquerystorage/issues/2176)) ([494ce85](https://github.com/googleapis/java-bigquerystorage/commit/494ce8513e8925b4330a2bf45641ba38db625c1d)) + + +### Dependencies + +* Update dependency com.google.auto.value:auto-value to v1.10.2 ([#2171](https://github.com/googleapis/java-bigquerystorage/issues/2171)) ([721908d](https://github.com/googleapis/java-bigquerystorage/commit/721908d412f1d82aff9aed8edcf727fc5b1bf950)) +* Update dependency com.google.auto.value:auto-value-annotations to v1.10.2 ([#2172](https://github.com/googleapis/java-bigquerystorage/issues/2172)) ([8a51fae](https://github.com/googleapis/java-bigquerystorage/commit/8a51fae180ced3b362acc350999157d3d6e0da6a)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.29.0 ([#2168](https://github.com/googleapis/java-bigquerystorage/issues/2168)) ([50ca432](https://github.com/googleapis/java-bigquerystorage/commit/50ca432854851f7cc89cb50a327d9641000b81ee)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.30.0 ([#2184](https://github.com/googleapis/java-bigquerystorage/issues/2184)) ([87f93a9](https://github.com/googleapis/java-bigquerystorage/commit/87f93a921c62cd71808cddc35382bbaabb7da54b)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.13.0 ([#2180](https://github.com/googleapis/java-bigquerystorage/issues/2180)) ([7ce19e7](https://github.com/googleapis/java-bigquerystorage/commit/7ce19e7a4ca47df9590c1023abcc459248b1fec2)) +* Update dependency org.apache.avro:avro to v1.11.2 ([#2177](https://github.com/googleapis/java-bigquerystorage/issues/2177)) ([75ce0b5](https://github.com/googleapis/java-bigquerystorage/commit/75ce0b5d7009bbb47b91c222390cfe864b8bd84e)) + +## [2.39.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.39.0...v2.39.1) (2023-06-22) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.27.1 ([#2150](https://github.com/googleapis/java-bigquerystorage/issues/2150)) ([da736a6](https://github.com/googleapis/java-bigquerystorage/commit/da736a65378d007930e0afb9246d0f53bb41e0c3)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.28.0 ([#2163](https://github.com/googleapis/java-bigquerystorage/issues/2163)) ([088219e](https://github.com/googleapis/java-bigquerystorage/commit/088219effe0528df7c998c6e71adc62025d3b204)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.12.0 ([#2160](https://github.com/googleapis/java-bigquerystorage/issues/2160)) ([069165a](https://github.com/googleapis/java-bigquerystorage/commit/069165aa9e3644ae84bcffa501bee175623ee9b3)) +* Update dependency com.google.http-client:google-http-client to v1.43.3 ([#2156](https://github.com/googleapis/java-bigquerystorage/issues/2156)) ([814e826](https://github.com/googleapis/java-bigquerystorage/commit/814e8261689351bc88202be37975e78415192de5)) +* Update dependency com.google.truth:truth to v1.1.5 ([#2153](https://github.com/googleapis/java-bigquerystorage/issues/2153)) ([bf9f1da](https://github.com/googleapis/java-bigquerystorage/commit/bf9f1da8a3de27a775e7c3a58076dd06b026c459)) +* Update dependency org.json:json to v20230618 ([#2154](https://github.com/googleapis/java-bigquerystorage/issues/2154)) ([8e7b42a](https://github.com/googleapis/java-bigquerystorage/commit/8e7b42aeac292b3291a505e2a456499553951a8f)) + +## [2.39.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.38.0...v2.39.0) (2023-06-22) + + +### Features + +* Add estimated physical file sizes to ReadAPI v1 ([#2157](https://github.com/googleapis/java-bigquerystorage/issues/2157)) ([fbf6bf6](https://github.com/googleapis/java-bigquerystorage/commit/fbf6bf66567644dd46a5062507f75951800e10a9)) + +## [2.38.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.37.2...v2.38.0) (2023-06-13) + + +### Features + +* Add storage error codes for KMS ([5d2d3c6](https://github.com/googleapis/java-bigquerystorage/commit/5d2d3c6fc6be14a3cb79cf0ce0e82b48699a97c4)) +* Add table sampling to ReadAPI v1 ([5d2d3c6](https://github.com/googleapis/java-bigquerystorage/commit/5d2d3c6fc6be14a3cb79cf0ce0e82b48699a97c4)) + + +### Bug Fixes + +* Update copyright year in Java license header replacement template ([#1810](https://github.com/googleapis/java-bigquerystorage/issues/1810)) ([#2138](https://github.com/googleapis/java-bigquerystorage/issues/2138)) ([af99efe](https://github.com/googleapis/java-bigquerystorage/commit/af99efe938302e0d09c98308ad081244d9c5633e)) + + +### Dependencies + +* Update arrow.version to v12.0.1 ([#2143](https://github.com/googleapis/java-bigquerystorage/issues/2143)) ([7fb618e](https://github.com/googleapis/java-bigquerystorage/commit/7fb618e16b6a06278d511e771b8b46ebf4658aa9)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.27.0 ([#2131](https://github.com/googleapis/java-bigquerystorage/issues/2131)) ([e459348](https://github.com/googleapis/java-bigquerystorage/commit/e4593486959e86808255517861361805dc8769aa)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.11.0 ([#2137](https://github.com/googleapis/java-bigquerystorage/issues/2137)) ([5fe7cdb](https://github.com/googleapis/java-bigquerystorage/commit/5fe7cdb6c9b6899a13b0fdde0a5a436d68a509e2)) +* Update dependency org.apache.arrow:arrow-memory-netty to v12.0.1 ([#2144](https://github.com/googleapis/java-bigquerystorage/issues/2144)) ([e738bff](https://github.com/googleapis/java-bigquerystorage/commit/e738bff88e3efaf0f6beb5f2d93057a175671d3e)) +* Update dependency org.apache.arrow:arrow-vector to v12.0.1 ([#2145](https://github.com/googleapis/java-bigquerystorage/issues/2145)) ([34dfda6](https://github.com/googleapis/java-bigquerystorage/commit/34dfda6aeec700c0722ad559e71e85bdedd04bfb)) +* Update dependency org.mockito:mockito-core to v3.12.4 ([#2146](https://github.com/googleapis/java-bigquerystorage/issues/2146)) ([1434fc0](https://github.com/googleapis/java-bigquerystorage/commit/1434fc0b995f5d6c8039acca8ca530e9d11f490b)) + +## [2.37.2](https://github.com/googleapis/java-bigquerystorage/compare/v2.37.1...v2.37.2) (2023-05-30) + + +### Bug Fixes + +* Pass the parameter value of enableConnectionPool instead of true always ([#2096](https://github.com/googleapis/java-bigquerystorage/issues/2096)) ([253678d](https://github.com/googleapis/java-bigquerystorage/commit/253678df630c717fbcf7018b3245aa56b5f22660)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.26.0 ([#2114](https://github.com/googleapis/java-bigquerystorage/issues/2114)) ([8b44534](https://github.com/googleapis/java-bigquerystorage/commit/8b4453476b17dcbb10faaa52c6a2b06506d89d62)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.26.1 ([#2128](https://github.com/googleapis/java-bigquerystorage/issues/2128)) ([62afd46](https://github.com/googleapis/java-bigquerystorage/commit/62afd46bb4cad09886c49bac88a2052c8754f8fe)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.10.1 ([#2121](https://github.com/googleapis/java-bigquerystorage/issues/2121)) ([3fabc5d](https://github.com/googleapis/java-bigquerystorage/commit/3fabc5d467becd3d9b816f9545e8a1e6dd09f682)) +* Update dependency com.google.http-client:google-http-client to v1.43.2 ([#2109](https://github.com/googleapis/java-bigquerystorage/issues/2109)) ([0d57daf](https://github.com/googleapis/java-bigquerystorage/commit/0d57daff0d113089ba1957753febeed92024e4c2)) +* Update dependency com.google.truth:truth to v1.1.4 ([#2126](https://github.com/googleapis/java-bigquerystorage/issues/2126)) ([a0be7ad](https://github.com/googleapis/java-bigquerystorage/commit/a0be7ad59b1604972b190befe2e4befd2e2c6431)) + +## [2.37.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.37.0...v2.37.1) (2023-05-25) + + +### Bug Fixes + +* Bug fix for streamWriter & jsonStreamWriter ([#2122](https://github.com/googleapis/java-bigquerystorage/issues/2122)) ([36964a3](https://github.com/googleapis/java-bigquerystorage/commit/36964a39e0f2a7ede8c55c43313a32922c06fe15)) + +## [2.37.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.36.1...v2.37.0) (2023-05-12) + + +### Features + +* Adding setting KeepAlive to JsonWriter sample ([#2104](https://github.com/googleapis/java-bigquerystorage/issues/2104)) ([a5b95c1](https://github.com/googleapis/java-bigquerystorage/commit/a5b95c10df4ce34d542d3a3e9227522817538d10)) +* Improve client settings ([#2097](https://github.com/googleapis/java-bigquerystorage/issues/2097)) ([4fc2fac](https://github.com/googleapis/java-bigquerystorage/commit/4fc2fac3cc30d4d6f288c82cf1446d19fab95712)) + + +### Dependencies + +* Update arrow.version to v12 (major) ([#2099](https://github.com/googleapis/java-bigquerystorage/issues/2099)) ([e0c09d5](https://github.com/googleapis/java-bigquerystorage/commit/e0c09d57fdb80e81c3f4047cb845033a8bbe9542)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.25.0 ([#2094](https://github.com/googleapis/java-bigquerystorage/issues/2094)) ([6ca6677](https://github.com/googleapis/java-bigquerystorage/commit/6ca667703f480d50ec009af0b27c651d63bda0b5)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.9.0 ([#2108](https://github.com/googleapis/java-bigquerystorage/issues/2108)) ([af1001b](https://github.com/googleapis/java-bigquerystorage/commit/af1001b5f49e04e5d02057376c98363dbf6f9b9c)) +* Update dependency org.apache.arrow:arrow-memory-netty to v12 ([#2100](https://github.com/googleapis/java-bigquerystorage/issues/2100)) ([df7bda4](https://github.com/googleapis/java-bigquerystorage/commit/df7bda4da70c18fa1565fb261cf5e12820d51f62)) +* Update dependency org.apache.arrow:arrow-vector to v12 ([#2101](https://github.com/googleapis/java-bigquerystorage/issues/2101)) ([25c2682](https://github.com/googleapis/java-bigquerystorage/commit/25c26823ed07b9a77deec8e09a934f68b58813c2)) + +## [2.36.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.36.0...v2.36.1) (2023-04-27) + + +### Bug Fixes + +* Remove teh 30 minutes wait trying to refresh schema. Customers s… ([#2088](https://github.com/googleapis/java-bigquerystorage/issues/2088)) ([3ec294f](https://github.com/googleapis/java-bigquerystorage/commit/3ec294f4b7db34bb44d862224189d8fc42821b7d)) + +## [2.36.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.35.0...v2.36.0) (2023-04-26) + + +### Features + +* Adding ExecutorProvider support while creating BigQueryReadClient ([#2072](https://github.com/googleapis/java-bigquerystorage/issues/2072)) ([9221e18](https://github.com/googleapis/java-bigquerystorage/commit/9221e1896f7af6c2dd23e92f6ea13d86c3368600)) + + +### Bug Fixes + +* Reduce timeout of waiting in queue from 15 minutes to 5 minutes and rephrase the log a bit ([#2084](https://github.com/googleapis/java-bigquerystorage/issues/2084)) ([fe25f38](https://github.com/googleapis/java-bigquerystorage/commit/fe25f38215ffaacd2cbe4af74c0de9ea7c053d91)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.24.5 ([#2081](https://github.com/googleapis/java-bigquerystorage/issues/2081)) ([362568d](https://github.com/googleapis/java-bigquerystorage/commit/362568d4d8e1cf5e1e2a1bbfe3513d440f9fb99f)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.8.0 ([#2087](https://github.com/googleapis/java-bigquerystorage/issues/2087)) ([1f79a96](https://github.com/googleapis/java-bigquerystorage/commit/1f79a9691ebb5f22077534fb7d266df830cb7ec2)) + +## [2.35.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.34.2...v2.35.0) (2023-04-13) + + +### Features + +* Add public api to stream writer to set the maximum wait time ([#2066](https://github.com/googleapis/java-bigquerystorage/issues/2066)) ([1e9a8ca](https://github.com/googleapis/java-bigquerystorage/commit/1e9a8cac19c3748515ebff7990d02fd576c7dd23)) +* Add sample about processing permanent writer failure ([#2057](https://github.com/googleapis/java-bigquerystorage/issues/2057)) ([8eda934](https://github.com/googleapis/java-bigquerystorage/commit/8eda9347a90f59ddcf99501f8b71ba17c5f3a143)) +* Add schema aware stream writer ([#2048](https://github.com/googleapis/java-bigquerystorage/issues/2048)) ([ad136b9](https://github.com/googleapis/java-bigquerystorage/commit/ad136b9fa25e774a33d02fc3a82a76fb1152b5c5)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.24.4 ([#2070](https://github.com/googleapis/java-bigquerystorage/issues/2070)) ([ce9e962](https://github.com/googleapis/java-bigquerystorage/commit/ce9e96209cbafd5a4daa981c5e5252272dc9811a)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.7.0 ([#2077](https://github.com/googleapis/java-bigquerystorage/issues/2077)) ([b5ea788](https://github.com/googleapis/java-bigquerystorage/commit/b5ea788df26122dcdf3c7104658cc8fd35a38f72)) + +## [2.34.2](https://github.com/googleapis/java-bigquerystorage/compare/v2.34.1...v2.34.2) (2023-03-30) + + +### Bug Fixes + +* Correct AppendSerializtionError typo ([#2037](https://github.com/googleapis/java-bigquerystorage/issues/2037)) ([e67e913](https://github.com/googleapis/java-bigquerystorage/commit/e67e913f34fda4f4cc523c0248e5344232c0b736)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.24.0 ([#2054](https://github.com/googleapis/java-bigquerystorage/issues/2054)) ([e3156c7](https://github.com/googleapis/java-bigquerystorage/commit/e3156c7b525f7df2f3fe756f096e7fb1352fae8e)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.24.1 ([#2056](https://github.com/googleapis/java-bigquerystorage/issues/2056)) ([a989ac6](https://github.com/googleapis/java-bigquerystorage/commit/a989ac63d813cc98dcc13200a950fe3edad10bdf)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.24.3 ([#2058](https://github.com/googleapis/java-bigquerystorage/issues/2058)) ([9346667](https://github.com/googleapis/java-bigquerystorage/commit/934666737a92ec3220c6a186cc1af0f1adabb00c)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.6.0 ([#2063](https://github.com/googleapis/java-bigquerystorage/issues/2063)) ([965de7b](https://github.com/googleapis/java-bigquerystorage/commit/965de7bf78884cca30e6e6d672b74d734bda840d)) + +## [2.34.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.34.0...v2.34.1) (2023-03-21) + + +### Bug Fixes + +* Add service_yaml_parameters to `java_gapic_library` targets ([#2034](https://github.com/googleapis/java-bigquerystorage/issues/2034)) ([8dae87c](https://github.com/googleapis/java-bigquerystorage/commit/8dae87cff06c7d08b9d597be9c66de570739abf4)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.23.2 ([#2029](https://github.com/googleapis/java-bigquerystorage/issues/2029)) ([e9c0152](https://github.com/googleapis/java-bigquerystorage/commit/e9c0152c2b098d7fb2d1de4d535d636a3ac9f90e)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.5.0 ([#2050](https://github.com/googleapis/java-bigquerystorage/issues/2050)) ([349092f](https://github.com/googleapis/java-bigquerystorage/commit/349092f28f6017b1df5b325e9a9036c311e70546)) +* Update dependency com.google.http-client:google-http-client to v1.43.1 ([#2038](https://github.com/googleapis/java-bigquerystorage/issues/2038)) ([5e865c9](https://github.com/googleapis/java-bigquerystorage/commit/5e865c9b3779f0d20080eead241f1b5858156880)) + +## [2.34.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.33.1...v2.34.0) (2023-03-10) + + +### Features + +* Add routing header for multiplexed connection ([#2035](https://github.com/googleapis/java-bigquerystorage/issues/2035)) ([1f2752f](https://github.com/googleapis/java-bigquerystorage/commit/1f2752f3988ac87dca50813d69d0d519a2356a30)) + + +### Bug Fixes + +* **bigdecimal:** Convert BigDecimal to BigNumeric instead of Numeric ([#2031](https://github.com/googleapis/java-bigquerystorage/issues/2031)) ([e0f7e34](https://github.com/googleapis/java-bigquerystorage/commit/e0f7e34ca7f7c60279985afec46f780e0acc9a9e)), closes [#2013](https://github.com/googleapis/java-bigquerystorage/issues/2013) + +## [2.33.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.33.0...v2.33.1) (2023-03-02) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.23.0 ([#2012](https://github.com/googleapis/java-bigquerystorage/issues/2012)) ([0651aa6](https://github.com/googleapis/java-bigquerystorage/commit/0651aa6f3e83da73da77ae2e9376f6203cd36338)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.4.0 ([#2024](https://github.com/googleapis/java-bigquerystorage/issues/2024)) ([9135769](https://github.com/googleapis/java-bigquerystorage/commit/91357697f61d3026ae3fb14605e9e3ee94b351d1)) +* Update dependency com.google.http-client:google-http-client to v1.43.0 ([#2018](https://github.com/googleapis/java-bigquerystorage/issues/2018)) ([6bccd9d](https://github.com/googleapis/java-bigquerystorage/commit/6bccd9d21698fa94645bfdda2e7d4e70af612d6b)) +* Update dependency org.json:json to v20230227 ([#2020](https://github.com/googleapis/java-bigquerystorage/issues/2020)) ([6d6bb76](https://github.com/googleapis/java-bigquerystorage/commit/6d6bb76188d4be6beec88c54946d6f9515962c55)) + +## [2.33.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.32.1...v2.33.0) (2023-03-01) + + +### Features + +* Add header back to the client ([#2016](https://github.com/googleapis/java-bigquerystorage/issues/2016)) ([de00447](https://github.com/googleapis/java-bigquerystorage/commit/de00447958e5939d7be9d0f7da02323aabbfed8c)) + + +### Bug Fixes + +* Add client shutdown if request waiting in request queue for too long. ([#2017](https://github.com/googleapis/java-bigquerystorage/issues/2017)) ([91da88b](https://github.com/googleapis/java-bigquerystorage/commit/91da88b0ed914bf55111dd9cef2a3fc4b27c3443)) +* Allow StreamWriter settings to override passed in BQ client setting ([#2001](https://github.com/googleapis/java-bigquerystorage/issues/2001)) ([66db8fe](https://github.com/googleapis/java-bigquerystorage/commit/66db8fed26474076fb5aaca5044d39e11f6ef28d)) +* Catch uncaught exception from append loop and add expoential retry to reconnection ([#2015](https://github.com/googleapis/java-bigquerystorage/issues/2015)) ([35db0fb](https://github.com/googleapis/java-bigquerystorage/commit/35db0fb38a929a8f3e4db30ee173ce5a4af43d64)) +* Remove write_location header pending discussion ([#2021](https://github.com/googleapis/java-bigquerystorage/issues/2021)) ([0941d43](https://github.com/googleapis/java-bigquerystorage/commit/0941d4363daf782e0be81c11fdf6a2fe0ff4d7ac)) + +## [2.32.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.32.0...v2.32.1) (2023-02-22) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.22.0 ([#1991](https://github.com/googleapis/java-bigquerystorage/issues/1991)) ([0684e7c](https://github.com/googleapis/java-bigquerystorage/commit/0684e7cf0f0b4d8ff343aee29f252489fe6c1ee7)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.3.0 ([#2003](https://github.com/googleapis/java-bigquerystorage/issues/2003)) ([d146bec](https://github.com/googleapis/java-bigquerystorage/commit/d146becd5bf1022a4259c027a5e9b8db262170ea)) + +## [2.32.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.31.1...v2.32.0) (2023-02-21) + + +### Features + +* Add default_value_expression to TableFieldSchema ([#1988](https://github.com/googleapis/java-bigquerystorage/issues/1988)) ([89c767f](https://github.com/googleapis/java-bigquerystorage/commit/89c767f059ead3ce30842a478e395ac657f4b4ef)) +* Add functions to set missing value map in the stream writers ([#1966](https://github.com/googleapis/java-bigquerystorage/issues/1966)) ([98d7e44](https://github.com/googleapis/java-bigquerystorage/commit/98d7e446b75aba02ce27cdcb5e835c3fd0f3ad54)) + + +### Bug Fixes + +* Extra logging for investigation of the stuck case. ([#1999](https://github.com/googleapis/java-bigquerystorage/issues/1999)) ([e6ee13a](https://github.com/googleapis/java-bigquerystorage/commit/e6ee13a70304c2278d6b7a94dc19049a2b786a6e)) + +## [2.31.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.31.0...v2.31.1) (2023-02-14) + + +### Bug Fixes + +* Refactor only, add StreamWriter to AppendRowsRequestResponse ([#1981](https://github.com/googleapis/java-bigquerystorage/issues/1981)) ([da06a46](https://github.com/googleapis/java-bigquerystorage/commit/da06a4623e89b5f3caf90f85dd87d8538fc7d312)) +* Support ByteString values on repeated fields ([#1996](https://github.com/googleapis/java-bigquerystorage/issues/1996)) ([0263b00](https://github.com/googleapis/java-bigquerystorage/commit/0263b0054fbb7d9f49157f507cfe11c5b5816b07)) + +## [2.31.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.30.0...v2.31.0) (2023-02-08) + + +### Features + +* Add isDone to JsonWriter to indicate a JsonWriter is no longer usable and needs to be recreated. ([#1978](https://github.com/googleapis/java-bigquerystorage/issues/1978)) ([dc07ed8](https://github.com/googleapis/java-bigquerystorage/commit/dc07ed84778028f149cab3883af9bb9a7929efd1)) +* Add isUserClosed to indicate that user explicitly closed the StreamWriter ([#1983](https://github.com/googleapis/java-bigquerystorage/issues/1983)) ([abd6627](https://github.com/googleapis/java-bigquerystorage/commit/abd66274abca2315739c1b388dd363e107baad85)) + + +### Dependencies + +* Update arrow.version to v11 (major) ([#1961](https://github.com/googleapis/java-bigquerystorage/issues/1961)) ([c13ab23](https://github.com/googleapis/java-bigquerystorage/commit/c13ab23a96d72d5087653ebaf0a0635863a526a9)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.21.0 ([#1956](https://github.com/googleapis/java-bigquerystorage/issues/1956)) ([6a1d783](https://github.com/googleapis/java-bigquerystorage/commit/6a1d783b80bdd0e1846ee667e2a15d41e3f6980f)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.2.0 ([#1974](https://github.com/googleapis/java-bigquerystorage/issues/1974)) ([8e2c1e3](https://github.com/googleapis/java-bigquerystorage/commit/8e2c1e326d6a65ba814ef826cfcb98c58395f01f)) +* Update dependency org.apache.arrow:arrow-memory-netty to v11 ([#1962](https://github.com/googleapis/java-bigquerystorage/issues/1962)) ([48abc75](https://github.com/googleapis/java-bigquerystorage/commit/48abc75cdd2c372751790c197b83cf3aa59f1055)) +* Update dependency org.apache.arrow:arrow-vector to v11 ([#1963](https://github.com/googleapis/java-bigquerystorage/issues/1963)) ([e872723](https://github.com/googleapis/java-bigquerystorage/commit/e87272323a0267771f3ffbc857fbaf06c6656f90)) + +## [2.30.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.29.0...v2.30.0) (2023-02-06) + + +### Features + +* Add userClose flag back to StreamWriter ([#1973](https://github.com/googleapis/java-bigquerystorage/issues/1973)) ([4b51acd](https://github.com/googleapis/java-bigquerystorage/commit/4b51acd49292d99e1c3cd670bec695b8bcd3b5c4)) + + +### Bug Fixes + +* Close stream connection before each retry ([#1975](https://github.com/googleapis/java-bigquerystorage/issues/1975)) ([54e9bb9](https://github.com/googleapis/java-bigquerystorage/commit/54e9bb9116eb9781b6f2266f98de4d3853a469b8)) +* Improve ConnectionWorker fine logging ([#1972](https://github.com/googleapis/java-bigquerystorage/issues/1972)) ([812bcf1](https://github.com/googleapis/java-bigquerystorage/commit/812bcf16efdc7a1797435f0b87430c35f27f7245)) + +## [2.29.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.28.4...v2.29.0) (2023-02-01) + + +### Features + +* Add timeout to inflight queue waiting ([#1957](https://github.com/googleapis/java-bigquerystorage/issues/1957)) ([3159b12](https://github.com/googleapis/java-bigquerystorage/commit/3159b120e5cd388cf9776a1fa928a3e6ae105d9d)) +* Allow java client to handle schema change during same stream name ([#1964](https://github.com/googleapis/java-bigquerystorage/issues/1964)) ([305f71e](https://github.com/googleapis/java-bigquerystorage/commit/305f71ee4b274df58388fc3000e9f5da9fc908e1)) + + +### Bug Fixes + +* At connection level, retry for internal errors ([#1965](https://github.com/googleapis/java-bigquerystorage/issues/1965)) ([9c01bc1](https://github.com/googleapis/java-bigquerystorage/commit/9c01bc11b51dc1e3e209e4d6b666b9ddd3212cf5)) +* Reduce visibility of the ConnectionPool and ConnectionWorker, so… ([#1954](https://github.com/googleapis/java-bigquerystorage/issues/1954)) ([dcb234b](https://github.com/googleapis/java-bigquerystorage/commit/dcb234b95d0812d4d91b0c206d0b7e0fb30ab0fa)) +* Remove unrecoverable connection from connection pool during multiplexing ([#1967](https://github.com/googleapis/java-bigquerystorage/issues/1967)) ([091dddb](https://github.com/googleapis/java-bigquerystorage/commit/091dddb9b2baf1f4b481e8d7961d451b71a8508b)) + +## [2.28.4](https://github.com/googleapis/java-bigquerystorage/compare/v2.28.3...v2.28.4) (2023-01-25) + + +### Bug Fixes + +* Fix one potential root cause of deadlock in connection worker ([#1955](https://github.com/googleapis/java-bigquerystorage/issues/1955)) ([598ce5e](https://github.com/googleapis/java-bigquerystorage/commit/598ce5eb5e8d5efaaa841495794be4d39c6c0fce)) + +## [2.28.3](https://github.com/googleapis/java-bigquerystorage/compare/v2.28.2...v2.28.3) (2023-01-20) + + +### Bug Fixes + +* Fix deadlock issue in ConnectionWorkerPool ([#1938](https://github.com/googleapis/java-bigquerystorage/issues/1938)) ([caf1e76](https://github.com/googleapis/java-bigquerystorage/commit/caf1e7603153b1b8de90d6294ac15c711076d8f4)) +* **java:** Skip fixing poms for special modules ([#1744](https://github.com/googleapis/java-bigquerystorage/issues/1744)) ([#1946](https://github.com/googleapis/java-bigquerystorage/issues/1946)) ([2863542](https://github.com/googleapis/java-bigquerystorage/commit/286354231eaf8e329bbea05e0c45b28e14f4a1e7)) +* Update BQTableSchemaToProtoDescriptor to unblock a protobuf change. ([#1942](https://github.com/googleapis/java-bigquerystorage/issues/1942)) ([62cc80a](https://github.com/googleapis/java-bigquerystorage/commit/62cc80a216ff4969a30e5fe93ff74b2c582e4677)) +* We should isolate the client used in StreamWriter and the client used in ConnectionWorker ([#1933](https://github.com/googleapis/java-bigquerystorage/issues/1933)) ([3530672](https://github.com/googleapis/java-bigquerystorage/commit/3530672f0bddfacb973fb0fc1d30aabb4ffefccb)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.1.2 ([#1949](https://github.com/googleapis/java-bigquerystorage/issues/1949)) ([23ec7fa](https://github.com/googleapis/java-bigquerystorage/commit/23ec7fa2c70c720fb995bdfac9416a29d77de2e2)) + +## [2.28.2](https://github.com/googleapis/java-bigquerystorage/compare/v2.28.1...v2.28.2) (2023-01-18) + + +### Bug Fixes + +* Add a timeout on retry for retryable errors ([#1930](https://github.com/googleapis/java-bigquerystorage/issues/1930)) ([2d648cf](https://github.com/googleapis/java-bigquerystorage/commit/2d648cf9706a6e7bc155e8769ba7dda2a6bc3061)) +* Add precision overwritten to 9 digit if the passed in JSON type is FLOAT or DOUBLE ([#1932](https://github.com/googleapis/java-bigquerystorage/issues/1932)) ([417bc6c](https://github.com/googleapis/java-bigquerystorage/commit/417bc6c76f7b9fa602721c3c183c487c5aab2e09)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.20.2 ([#1929](https://github.com/googleapis/java-bigquerystorage/issues/1929)) ([a95ae9d](https://github.com/googleapis/java-bigquerystorage/commit/a95ae9d708bd3e2d8f55297a6004a0d937c8d83f)) + +## [2.28.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.28.0...v2.28.1) (2023-01-12) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.20.1 ([#1919](https://github.com/googleapis/java-bigquerystorage/issues/1919)) ([b6565f2](https://github.com/googleapis/java-bigquerystorage/commit/b6565f2ee0d3b45f55bc3ade6918d36cf0bd20da)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.1.1 ([#1922](https://github.com/googleapis/java-bigquerystorage/issues/1922)) ([074a20e](https://github.com/googleapis/java-bigquerystorage/commit/074a20e0a9baf228e34fa01511a980862650c66a)) + +## [2.28.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.27.0...v2.28.0) (2023-01-04) + + +### Features + +* Add estimated number of rows to CreateReadSession response ([#1913](https://github.com/googleapis/java-bigquerystorage/issues/1913)) ([4840b26](https://github.com/googleapis/java-bigquerystorage/commit/4840b26956c22e40b6edcefe57f26dd0386e90e5)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.20.0 ([#1912](https://github.com/googleapis/java-bigquerystorage/issues/1912)) ([e9e7ac3](https://github.com/googleapis/java-bigquerystorage/commit/e9e7ac3d4e655f7b77d830108226891c45464069)) +* Update dependency org.json:json to v20220924 ([#1799](https://github.com/googleapis/java-bigquerystorage/issues/1799)) ([a0a5d52](https://github.com/googleapis/java-bigquerystorage/commit/a0a5d52cdd06739992944126a89fe58daf4ee605)) + +## [2.27.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.26.0...v2.27.0) (2022-12-12) + + +### Features + +* Change one thread per retry to use a thread pool ([#1898](https://github.com/googleapis/java-bigquerystorage/issues/1898)) ([44a4e4d](https://github.com/googleapis/java-bigquerystorage/commit/44a4e4d8c70bf910f96d2d2c181d1f23b70f3e52)) +* Throw error when using connection pool for explicit stream ([#1903](https://github.com/googleapis/java-bigquerystorage/issues/1903)) ([bd89556](https://github.com/googleapis/java-bigquerystorage/commit/bd895567fe33735294065d7043d845f14f33f8a8)) + + +### Bug Fixes + +* Add back the accidentally removed background executor provider ([#1899](https://github.com/googleapis/java-bigquerystorage/issues/1899)) ([065cc4f](https://github.com/googleapis/java-bigquerystorage/commit/065cc4f35ad5e5cdc393e01adbea881c6e92dcab)) +* Update JsonStreamWriterBuilder comment and update sample to use the latest schema retrieval support ([#1902](https://github.com/googleapis/java-bigquerystorage/issues/1902)) ([2a46ec3](https://github.com/googleapis/java-bigquerystorage/commit/2a46ec3df419bbeba6a2e60b2b621dc60f31eab1)) + + +### Dependencies + +* Update arrow.version to v10.0.1 ([#1894](https://github.com/googleapis/java-bigquerystorage/issues/1894)) ([8e90767](https://github.com/googleapis/java-bigquerystorage/commit/8e907677bae0c48e8c64be5cbab8fb203645f4ef)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.1.0 ([#1905](https://github.com/googleapis/java-bigquerystorage/issues/1905)) ([767be03](https://github.com/googleapis/java-bigquerystorage/commit/767be035defbcc5b77f1276bb1a2983b6a7423bd)) +* Update dependency org.apache.arrow:arrow-memory-netty to v10.0.1 ([#1895](https://github.com/googleapis/java-bigquerystorage/issues/1895)) ([8b079cc](https://github.com/googleapis/java-bigquerystorage/commit/8b079cc47ca914756bd73dda98bd15393754ebb1)) +* Update dependency org.apache.arrow:arrow-vector to v10.0.1 ([#1896](https://github.com/googleapis/java-bigquerystorage/issues/1896)) ([087ac6e](https://github.com/googleapis/java-bigquerystorage/commit/087ac6e7ad813dc248e9029f91c454299a87b2b3)) + +## [2.26.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.25.0...v2.26.0) (2022-11-18) + + +### Features + +* Add missing_value_interpretations to AppendRowsRequest ([#1885](https://github.com/googleapis/java-bigquerystorage/issues/1885)) ([0def62f](https://github.com/googleapis/java-bigquerystorage/commit/0def62fb6375d35e753f60dc2ac3b102e15ef336)) +* Add update schema support for multiplexing ([#1867](https://github.com/googleapis/java-bigquerystorage/issues/1867)) ([2adf81b](https://github.com/googleapis/java-bigquerystorage/commit/2adf81b70da043c6a66e38dbd52ef7aae9cffb9e)) +* Fix windows build failure by using nanoSeconds instead of Instant for better accuracy. ([#1887](https://github.com/googleapis/java-bigquerystorage/issues/1887)) ([e5cd7df](https://github.com/googleapis/java-bigquerystorage/commit/e5cd7df54e2f6af12c240268a91e0afc2ec27a8a)) +* Next release from main branch is 2.26.0 ([#1874](https://github.com/googleapis/java-bigquerystorage/issues/1874)) ([589dd63](https://github.com/googleapis/java-bigquerystorage/commit/589dd637dfc16fb9bd3655a0d98115f601983997)) +* Provide sample code for row-level error handling ([#1863](https://github.com/googleapis/java-bigquerystorage/issues/1863)) ([027c5bb](https://github.com/googleapis/java-bigquerystorage/commit/027c5bb7cc27234bc1e3a552b4896b919921dc71)) + + +### Bug Fixes + +* Fix window build bug caused by Instant resolution. ([#1884](https://github.com/googleapis/java-bigquerystorage/issues/1884)) ([2332dc1](https://github.com/googleapis/java-bigquerystorage/commit/2332dc13bb45c7377722a3a289d66a4cf73bb79d)) + + +### Dependencies + +* Update dependency com.google.auto.value:auto-value to v1.10.1 ([#1888](https://github.com/googleapis/java-bigquerystorage/issues/1888)) ([9546298](https://github.com/googleapis/java-bigquerystorage/commit/954629811d0b9e942f23e0cb0d1935cd38e4b30b)) +* Update dependency com.google.auto.value:auto-value-annotations to v1.10.1 ([#1889](https://github.com/googleapis/java-bigquerystorage/issues/1889)) ([b4eec03](https://github.com/googleapis/java-bigquerystorage/commit/b4eec032706f2ed7039b3ad9bfd81b8dea536008)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.19.1 ([#1878](https://github.com/googleapis/java-bigquerystorage/issues/1878)) ([4b1989b](https://github.com/googleapis/java-bigquerystorage/commit/4b1989b51d18b081c8e980a59bd6117230c11a9d)) + +## [2.25.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.24.2...v2.25.0) (2022-11-08) + + +### Features + +* Add schema comparision to the main request loop for multiplexing to correctly update schema ([#1865](https://github.com/googleapis/java-bigquerystorage/issues/1865)) ([cb18d28](https://github.com/googleapis/java-bigquerystorage/commit/cb18d288f78773af60b9fddc583f46a571cbc3f4)) + + +### Documentation + +* Remove stale header guidance for AppendRows ([#1866](https://github.com/googleapis/java-bigquerystorage/issues/1866)) ([1de23be](https://github.com/googleapis/java-bigquerystorage/commit/1de23be7fe84747a4e7ca5b511458a5b378a1170)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.18.2 ([#1862](https://github.com/googleapis/java-bigquerystorage/issues/1862)) ([ca2cafe](https://github.com/googleapis/java-bigquerystorage/commit/ca2cafe095eca8b610ec8ca1acf95cf7a561af61)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.19.0 ([#1871](https://github.com/googleapis/java-bigquerystorage/issues/1871)) ([4aa967a](https://github.com/googleapis/java-bigquerystorage/commit/4aa967a0a30db0037a04b6b47646ae79dfffaead)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.6 ([#1870](https://github.com/googleapis/java-bigquerystorage/issues/1870)) ([17b2a80](https://github.com/googleapis/java-bigquerystorage/commit/17b2a8036cf5706cb753f7116b1fa424a661ba39)) +* Update dependency kr.motd.maven:os-maven-plugin to v1.7.1 ([#1868](https://github.com/googleapis/java-bigquerystorage/issues/1868)) ([08b8468](https://github.com/googleapis/java-bigquerystorage/commit/08b8468dc31fe654f5f7a345377b14f51f19eeea)) + +## [2.24.2](https://github.com/googleapis/java-bigquerystorage/compare/v2.24.1...v2.24.2) (2022-10-28) + + +### Bug Fixes + +* Remove applying header for multiplexing client and add a unit test for multiplexing with different location ([df9b487](https://github.com/googleapis/java-bigquerystorage/commit/df9b4876f868d1e33a1c2273a83fc7bfcd3ddf27)) + + +### Dependencies + +* Revert dependency upgrade for protobuf to v3.20.2 ([#1659](https://github.com/googleapis/java-bigquerystorage/issues/1659)) ([#1856](https://github.com/googleapis/java-bigquerystorage/issues/1856)) ([df9b487](https://github.com/googleapis/java-bigquerystorage/commit/df9b4876f868d1e33a1c2273a83fc7bfcd3ddf27)) +* Update arrow.version to v10 (major) (https://togithub.com/googleapis/java-bigquerystorage/issues/1847) ([b0b5f06](https://togithub.com/googleapis/java-bigquerystorage/commit/b0b5f06de60bce556b813e91c7ac665d8f045906)) ([df9b487](https://github.com/googleapis/java-bigquerystorage/commit/df9b4876f868d1e33a1c2273a83fc7bfcd3ddf27)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.18.0 (https://togithub.com/googleapis/java-bigquerystorage/issues/1852) ([bb475bd](https://togithub.com/googleapis/java-bigquerystorage/commit/bb475bdbeb0749889fec32fcecf36d0b51fdd8ac)) ([df9b487](https://github.com/googleapis/java-bigquerystorage/commit/df9b4876f868d1e33a1c2273a83fc7bfcd3ddf27)) +* Update dependency org.apache.arrow:arrow-memory-netty to v10 (https://togithub.com/googleapis/java-bigquerystorage/issues/1848) ([d6d96ff](https://togithub.com/googleapis/java-bigquerystorage/commit/d6d96ffb58be65d2a06c4f02de351fadd08da8e4)) ([df9b487](https://github.com/googleapis/java-bigquerystorage/commit/df9b4876f868d1e33a1c2273a83fc7bfcd3ddf27)) +* Update dependency org.apache.arrow:arrow-vector to v10 (https://togithub.com/googleapis/java-bigquerystorage/issues/1849) ([66853c2](https://togithub.com/googleapis/java-bigquerystorage/commit/66853c216ed2887e2d56f987cfffcb10d616f4bb)) ([df9b487](https://github.com/googleapis/java-bigquerystorage/commit/df9b4876f868d1e33a1c2273a83fc7bfcd3ddf27)) + +## [2.24.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.24.0...v2.24.1) (2022-10-28) + + +### Bug Fixes + +* Remove applying header for multiplexing client and add a unit test for multiplexing with different location ([#1850](https://github.com/googleapis/java-bigquerystorage/issues/1850)) ([1733d5a](https://github.com/googleapis/java-bigquerystorage/commit/1733d5a380080a0826aa9fcc41672bdb6a64c249)) + + +### Dependencies + +* Update arrow.version to v10 (major) ([#1847](https://github.com/googleapis/java-bigquerystorage/issues/1847)) ([b0b5f06](https://github.com/googleapis/java-bigquerystorage/commit/b0b5f06de60bce556b813e91c7ac665d8f045906)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.18.0 ([#1852](https://github.com/googleapis/java-bigquerystorage/issues/1852)) ([bb475bd](https://github.com/googleapis/java-bigquerystorage/commit/bb475bdbeb0749889fec32fcecf36d0b51fdd8ac)) +* Update dependency org.apache.arrow:arrow-memory-netty to v10 ([#1848](https://github.com/googleapis/java-bigquerystorage/issues/1848)) ([d6d96ff](https://github.com/googleapis/java-bigquerystorage/commit/d6d96ffb58be65d2a06c4f02de351fadd08da8e4)) +* Update dependency org.apache.arrow:arrow-vector to v10 ([#1849](https://github.com/googleapis/java-bigquerystorage/issues/1849)) ([66853c2](https://github.com/googleapis/java-bigquerystorage/commit/66853c216ed2887e2d56f987cfffcb10d616f4bb)) + +## [2.24.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.23.1...v2.24.0) (2022-10-25) + + +### Features + +* Add getInflightWaitSeconds implementation ([#1835](https://github.com/googleapis/java-bigquerystorage/issues/1835)) ([b569116](https://github.com/googleapis/java-bigquerystorage/commit/b569116179700123e405eb7712abb136456f2f26)) +* **shortsAndBigDecimals:** Add shorts and big decimals ([#1674](https://github.com/googleapis/java-bigquerystorage/issues/1674)) ([604d7df](https://github.com/googleapis/java-bigquerystorage/commit/604d7df2afa54e445a4d310e5293d8d19901d565)) + + +### Bug Fixes + +* **java:** Restore native image configurations ([#1844](https://github.com/googleapis/java-bigquerystorage/issues/1844)) ([8ce670a](https://github.com/googleapis/java-bigquerystorage/commit/8ce670a487a192807ccfd58613fc271d9402ca7c)) +* Remove the client lib header setting since after router migration, it is no longer needed ([#1842](https://github.com/googleapis/java-bigquerystorage/issues/1842)) ([5f3b821](https://github.com/googleapis/java-bigquerystorage/commit/5f3b8214116b17b315d589bfde184e0e045cff69)) + + +### Dependencies + +* Update dependency com.google.auto.value:auto-value to v1.10 ([#1825](https://github.com/googleapis/java-bigquerystorage/issues/1825)) ([f7b8f2b](https://github.com/googleapis/java-bigquerystorage/commit/f7b8f2b4df8ca9b306a75d46eb223124ac9bdebb)) +* Update dependency com.google.auto.value:auto-value-annotations to v1.10 ([#1826](https://github.com/googleapis/java-bigquerystorage/issues/1826)) ([37eb8a1](https://github.com/googleapis/java-bigquerystorage/commit/37eb8a1df34e8ae4d5e2849c76d9a591ec5505d4)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.17.1 ([#1829](https://github.com/googleapis/java-bigquerystorage/issues/1829)) ([7e8d900](https://github.com/googleapis/java-bigquerystorage/commit/7e8d90037f5ae426882f90ab251d315767b3a6b7)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.5 ([#1840](https://github.com/googleapis/java-bigquerystorage/issues/1840)) ([474756b](https://github.com/googleapis/java-bigquerystorage/commit/474756bd6547254ed6d761a73f2e69920fa79458)) + +## [2.23.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.23.0...v2.23.1) (2022-10-04) + + +### Bug Fixes + +* update protobuf to v3.21.7 ([77bf65b](https://github.com/googleapis/java-bigquerystorage/commit/77bf65b94e324712ff957cb709d393c1f825ebcf)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.17.0 ([#1821](https://github.com/googleapis/java-bigquerystorage/issues/1821)) ([8747079](https://github.com/googleapis/java-bigquerystorage/commit/87470799a55b60d671af977dca14140e9f6d94c6)) + +## [2.23.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.22.0...v2.23.0) (2022-10-03) + + +### Features + +* return list of row errors from append ([df7c6e9](https://github.com/googleapis/java-bigquerystorage/commit/df7c6e9816d43eac196d77604bc99db6115de670)) + + +### Bug Fixes + +* return row-level error information via an AppendSerializtionError exception ([df7c6e9](https://github.com/googleapis/java-bigquerystorage/commit/df7c6e9816d43eac196d77604bc99db6115de670)) + +## [2.22.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.21.0...v2.22.0) (2022-09-29) + + +### Features + +* Add a new specific exception about json data has unknown field ([#1792](https://github.com/googleapis/java-bigquerystorage/issues/1792)) ([18f93c1](https://github.com/googleapis/java-bigquerystorage/commit/18f93c124334464a951b3d3065bdf11bbda13dac)) +* Add cache for location in stream writer, and trigger that when location is not presented ([#1804](https://github.com/googleapis/java-bigquerystorage/issues/1804)) ([c24c14f](https://github.com/googleapis/java-bigquerystorage/commit/c24c14f14b1a6f8249f55630d51e3cadd500c1ad)) +* Add close() to multiplexing client ([#1788](https://github.com/googleapis/java-bigquerystorage/issues/1788)) ([6b3a974](https://github.com/googleapis/java-bigquerystorage/commit/6b3a97491bf718333ae7c085f9b10723d8b24388)) +* Add fully managed schema support on json writer ([#1794](https://github.com/googleapis/java-bigquerystorage/issues/1794)) ([b6b515f](https://github.com/googleapis/java-bigquerystorage/commit/b6b515f57a0f6956c9d9f902a5e3e16edc845a48)) +* Add more retry error code to the sample ([#1805](https://github.com/googleapis/java-bigquerystorage/issues/1805)) ([4bf67bc](https://github.com/googleapis/java-bigquerystorage/commit/4bf67bcf0de2d370f0b04d8df236d30466b91598)) +* Add multiplexing client core algorithm and basic testing, plus fix a tiny bug in fake server ([#1787](https://github.com/googleapis/java-bigquerystorage/issues/1787)) ([1bb8e26](https://github.com/googleapis/java-bigquerystorage/commit/1bb8e262941f570d7f2de60123ec5a6a0cf43600)) +* Add multiplexing support to connection worker. ([#1784](https://github.com/googleapis/java-bigquerystorage/issues/1784)) ([a869a1d](https://github.com/googleapis/java-bigquerystorage/commit/a869a1d8baba3cc0f6046d661c6f52ec12a3f12d)) +* Add support for flexible column name in JsonStreamWriter ([#1786](https://github.com/googleapis/java-bigquerystorage/issues/1786)) ([694abbb](https://github.com/googleapis/java-bigquerystorage/commit/694abbb43bf2970cd81803521c349dc31a38f484)) +* Add two fine logs that would allow Datastream to look into the stuck issue ([#1791](https://github.com/googleapis/java-bigquerystorage/issues/1791)) ([745ceb4](https://github.com/googleapis/java-bigquerystorage/commit/745ceb46dec5922efe394773028532dcd84a4f9b)) +* Always pass a null bigquery client lib to StreamWriter ([#1795](https://github.com/googleapis/java-bigquerystorage/issues/1795)) ([eec50c1](https://github.com/googleapis/java-bigquerystorage/commit/eec50c14e1dff84ae9a3e70f9d08d27b9e225e55)) +* Bug fix for checking write_stream field but proto might not contain the field to unblock the release ([#1806](https://github.com/googleapis/java-bigquerystorage/issues/1806)) ([9791d69](https://github.com/googleapis/java-bigquerystorage/commit/9791d693c75367bec6451ebf65ae4ea3347bf50f)) +* Client unknown fields drives writer refreshment ([#1797](https://github.com/googleapis/java-bigquerystorage/issues/1797)) ([d8aaed5](https://github.com/googleapis/java-bigquerystorage/commit/d8aaed522b9de487539165ea662465e5a96222f1)) +* ExecutorProvider can now be replaced ([#1770](https://github.com/googleapis/java-bigquerystorage/issues/1770)) ([6380f71](https://github.com/googleapis/java-bigquerystorage/commit/6380f713b8f42fa3a58df2750ea2cf3b7397d29c)), closes [#1769](https://github.com/googleapis/java-bigquerystorage/issues/1769) +* Fix some todos and reject stream writer if it's created with mixed behavior of passed in client or not ([#1803](https://github.com/googleapis/java-bigquerystorage/issues/1803)) ([1a69192](https://github.com/googleapis/java-bigquerystorage/commit/1a69192e2ffc6475a7e4b67c5a452f1c0e8aaddc)) +* Minor tune after offline testing ([#1807](https://github.com/googleapis/java-bigquerystorage/issues/1807)) ([694a870](https://github.com/googleapis/java-bigquerystorage/commit/694a870bac623ef038168a4358b9d73972077edb)) +* Populate location info if we already called GetWriteStream ([#1802](https://github.com/googleapis/java-bigquerystorage/issues/1802)) ([5f43103](https://github.com/googleapis/java-bigquerystorage/commit/5f4310321e7f90385f7ef5c32e3e5395f719d0ca)) +* Some fixes for multiplexing client ([#1798](https://github.com/googleapis/java-bigquerystorage/issues/1798)) ([b3ffd77](https://github.com/googleapis/java-bigquerystorage/commit/b3ffd77e4b86708f241ba517c55bb3508964bc0e)) +* Wire connection pool to stream writer without implementing updated schema ([#1790](https://github.com/googleapis/java-bigquerystorage/issues/1790)) ([3eb1475](https://github.com/googleapis/java-bigquerystorage/commit/3eb147545db2415e5a68752b8ede1c4d342d1a84)) + +## [2.21.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.20.1...v2.21.0) (2022-09-15) + + +### Features + +* Add connection worker skeleton used for multiplexing client ([#1778](https://github.com/googleapis/java-bigquerystorage/issues/1778)) ([b26265e](https://github.com/googleapis/java-bigquerystorage/commit/b26265e56a47cda3fca1329d349ab4a18288d4cb)) +* Add Load api for connection worker for multiplexing worker ([#1779](https://github.com/googleapis/java-bigquerystorage/issues/1779)) ([179930e](https://github.com/googleapis/java-bigquerystorage/commit/179930e6b0017f1fa12dcd3ef7c3efa374f41576)) +* Add location to WriteStream and add WriteStreamView support ([#1771](https://github.com/googleapis/java-bigquerystorage/issues/1771)) ([f446ff4](https://github.com/googleapis/java-bigquerystorage/commit/f446ff4f7d3f9d217501201289be416cbfdbe937)) +* Add proto annotation for non-ascii field mapping ([#1776](https://github.com/googleapis/java-bigquerystorage/issues/1776)) ([1a079ee](https://github.com/googleapis/java-bigquerystorage/commit/1a079ee0761bad2e942081f7f02b530f32e55afe)) +* introducing connection worker to run a loop for fetching / sending requests in a queue. ([7dd447d](https://github.com/googleapis/java-bigquerystorage/commit/7dd447da206cdd7a403191bd15203bb4cc53ccb5)) + + +### Bug Fixes + +* **api:** Numeric/bignumeric conversion issue [#1757](https://github.com/googleapis/java-bigquerystorage/issues/1757) ([#1768](https://github.com/googleapis/java-bigquerystorage/issues/1768)) ([5cc96a0](https://github.com/googleapis/java-bigquerystorage/commit/5cc96a0c5da5c8b557e96cd1a25afdff9d583a0f)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.15.0 ([#1766](https://github.com/googleapis/java-bigquerystorage/issues/1766)) ([40e0ba0](https://github.com/googleapis/java-bigquerystorage/commit/40e0ba09527c564004fcbbb0c3af2fb97eb811c0)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.16.0 ([#1777](https://github.com/googleapis/java-bigquerystorage/issues/1777)) ([08a7c8f](https://github.com/googleapis/java-bigquerystorage/commit/08a7c8f1dde71f48706bb0b1aa780f79012594bf)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.16.1 ([#1781](https://github.com/googleapis/java-bigquerystorage/issues/1781)) ([7ff7099](https://github.com/googleapis/java-bigquerystorage/commit/7ff709951f40c3683da628a9bcf66a5fe0a2e368)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.2 ([#1772](https://github.com/googleapis/java-bigquerystorage/issues/1772)) ([3279ef4](https://github.com/googleapis/java-bigquerystorage/commit/3279ef42e915881c255a8db8e677799f530c5d53)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.3 ([#1780](https://github.com/googleapis/java-bigquerystorage/issues/1780)) ([ed2cd66](https://github.com/googleapis/java-bigquerystorage/commit/ed2cd66a3804140711b9f4aa05b67641f2494bc9)) + +## [2.20.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.20.0...v2.20.1) (2022-08-24) + + +### Bug Fixes + +* Add unit test for closing disconnected streamwriter. Also reduce wait from 5->3 minutes ([#1751](https://github.com/googleapis/java-bigquerystorage/issues/1751)) ([095d7d5](https://github.com/googleapis/java-bigquerystorage/commit/095d7d59b47becf72a5e9cd3ced383fa45b04b50)) +* allow repeated field to have a null or missing json array ([#1760](https://github.com/googleapis/java-bigquerystorage/issues/1760)) ([ef24825](https://github.com/googleapis/java-bigquerystorage/commit/ef24825ca40e6156f6f3ce38c3c6051673a5f6cc)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.14.7 ([#1759](https://github.com/googleapis/java-bigquerystorage/issues/1759)) ([ac3f0b8](https://github.com/googleapis/java-bigquerystorage/commit/ac3f0b871a1db4bcbd5ce8fa019ae8e1559ed3ec)) + +## [2.20.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.19.1...v2.20.0) (2022-08-17) + + +### Features + +* introduce InflightLimitExceededException ([#1746](https://github.com/googleapis/java-bigquerystorage/issues/1746)) ([449353b](https://github.com/googleapis/java-bigquerystorage/commit/449353bd3b1fc78d46990c8203e1607d83f7ad03)) + + +### Bug Fixes + +* Add documentation to Exceptions ([#1745](https://github.com/googleapis/java-bigquerystorage/issues/1745)) ([3bc7aca](https://github.com/googleapis/java-bigquerystorage/commit/3bc7acaa4a110a0ce487e3976b5fd4ddb865ea24)) +* Add missing field error to row error message ([#1752](https://github.com/googleapis/java-bigquerystorage/issues/1752)) ([186d213](https://github.com/googleapis/java-bigquerystorage/commit/186d2135cb7e18410b89c93e909b03dc8e92ac6c)) +* Close based on whether connection is active, not status variable ([#1750](https://github.com/googleapis/java-bigquerystorage/issues/1750)) ([ce7f3e0](https://github.com/googleapis/java-bigquerystorage/commit/ce7f3e04046be6b4c1321be877034f5d0ab007f4)) + + +### Dependencies + +* fixing the test scope dependency to runtime ([#1742](https://github.com/googleapis/java-bigquerystorage/issues/1742)) ([878020b](https://github.com/googleapis/java-bigquerystorage/commit/878020b3667040aef1b8d610b37cd678ef932870)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.14.4 ([#1740](https://github.com/googleapis/java-bigquerystorage/issues/1740)) ([f842d51](https://github.com/googleapis/java-bigquerystorage/commit/f842d517b70847b8e67359257a213129586513f6)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.14.6 ([#1747](https://github.com/googleapis/java-bigquerystorage/issues/1747)) ([329bd9d](https://github.com/googleapis/java-bigquerystorage/commit/329bd9da268f48a4d18158845895b07a43c766ab)) + +## [2.19.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.19.0...v2.19.1) (2022-08-06) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.14.3 ([#1736](https://github.com/googleapis/java-bigquerystorage/issues/1736)) ([f36d4f0](https://github.com/googleapis/java-bigquerystorage/commit/f36d4f01b5529ca93ca5f713852bc8bd48c51cdc)) + +## [2.19.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.18.0...v2.19.0) (2022-08-05) + + +### Features + +* Add CANCELLED to StreamWriter retryable error code ([#1725](https://github.com/googleapis/java-bigquerystorage/issues/1725)) ([6d4c004](https://github.com/googleapis/java-bigquerystorage/commit/6d4c00474a4056d72544b5a8966de6acaf0eecd3)) +* remove a dummy WriterClosedException ([0b3018d](https://github.com/googleapis/java-bigquerystorage/commit/0b3018d9e4cd64402f8d111aa9b4857224b8134e)) + + +### Bug Fixes + +* Switch integration test onto v1 write api ([#1731](https://github.com/googleapis/java-bigquerystorage/issues/1731)) ([2ccb96e](https://github.com/googleapis/java-bigquerystorage/commit/2ccb96ecd7431a53ccd3a1b2bfd7df5a8e76498e)) + + +### Documentation + +* **owlbot-java:** explaining why not using formatter in pom.xml ([#1511](https://github.com/googleapis/java-bigquerystorage/issues/1511)) ([#1723](https://github.com/googleapis/java-bigquerystorage/issues/1723)) ([eabfa93](https://github.com/googleapis/java-bigquerystorage/commit/eabfa937fbda2244e67765aa6a01399ae04b943a)), closes [#1502](https://github.com/googleapis/java-bigquerystorage/issues/1502) + + +### Dependencies + +* update arrow.version to v9 (major) ([#1728](https://github.com/googleapis/java-bigquerystorage/issues/1728)) ([3694243](https://github.com/googleapis/java-bigquerystorage/commit/3694243b5f9e392b475c2a9fbb66979ff510781f)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.14.1 ([#1721](https://github.com/googleapis/java-bigquerystorage/issues/1721)) ([e27a9e9](https://github.com/googleapis/java-bigquerystorage/commit/e27a9e97ac2bf1e7a40bed17c67b3244f03c7d7a)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.14.2 ([#1734](https://github.com/googleapis/java-bigquerystorage/issues/1734)) ([0115e98](https://github.com/googleapis/java-bigquerystorage/commit/0115e98f8c11694183741c352ce97e0ab45894c7)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v3 ([#1718](https://github.com/googleapis/java-bigquerystorage/issues/1718)) ([940c613](https://github.com/googleapis/java-bigquerystorage/commit/940c613b1480948c3561df6e19a650b14dbf051f)) +* update dependency org.apache.arrow:arrow-memory-netty to v9 ([#1729](https://github.com/googleapis/java-bigquerystorage/issues/1729)) ([297ee99](https://github.com/googleapis/java-bigquerystorage/commit/297ee998f8f868480477dabff0e0a4b3417d7129)) +* update dependency org.apache.arrow:arrow-vector to v9 ([#1730](https://github.com/googleapis/java-bigquerystorage/issues/1730)) ([6010b42](https://github.com/googleapis/java-bigquerystorage/commit/6010b42c6d14f15681ee97a83c64136aefb75028)) +* update dependency org.apache.avro:avro to v1.11.1 ([#1719](https://github.com/googleapis/java-bigquerystorage/issues/1719)) ([754296f](https://github.com/googleapis/java-bigquerystorage/commit/754296fd24635e20edccb7371b27474f825741ba)) + +## [2.18.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.17.0...v2.18.0) (2022-07-27) + + +### Features + +* Add another StreamWriterClosedException and remove RETRY_THRESHOLD ([#1713](https://github.com/googleapis/java-bigquerystorage/issues/1713)) ([f8d1bd9](https://github.com/googleapis/java-bigquerystorage/commit/f8d1bd901232a61ca87c2671e478136dfd4f2432)) +* increase timeout waiting for done callback from 2 minutes to 5 … ([#1682](https://github.com/googleapis/java-bigquerystorage/issues/1682)) ([5171898](https://github.com/googleapis/java-bigquerystorage/commit/517189858263107ffc00cee5328ac958cb45a3f9)) + + +### Documentation + +* clarify size limitations for AppendRowsRequest ([#1714](https://github.com/googleapis/java-bigquerystorage/issues/1714)) ([ed3fe1f](https://github.com/googleapis/java-bigquerystorage/commit/ed3fe1fb20fc2e07c26da42b7564599642540317)) + +## [2.17.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.16.1...v2.17.0) (2022-07-25) + + +### Features + +* Return explicit StreamWriterClosedException ([#1709](https://github.com/googleapis/java-bigquerystorage/issues/1709)) ([57eb6d0](https://github.com/googleapis/java-bigquerystorage/commit/57eb6d0078a498e2f792d76b685c113a6a52ea8e)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.14.0 ([#1706](https://github.com/googleapis/java-bigquerystorage/issues/1706)) ([e92a8c0](https://github.com/googleapis/java-bigquerystorage/commit/e92a8c066130a6f28500fa887aef96da8a7af6df)) + +## [2.16.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.16.0...v2.16.1) (2022-07-19) + + +### Bug Fixes + +* enable longpaths support for windows test ([#1485](https://github.com/googleapis/java-bigquerystorage/issues/1485)) ([#1699](https://github.com/googleapis/java-bigquerystorage/issues/1699)) ([0cb05fd](https://github.com/googleapis/java-bigquerystorage/commit/0cb05fd3f9a732a0e7fc0cc494e1a305ce44805f)) + +## [2.16.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.15.0...v2.16.0) (2022-07-11) + + +### Features + +* expose row level serialization failures for JsonStreamWriter append ([#1686](https://github.com/googleapis/java-bigquerystorage/issues/1686)) ([bba0746](https://github.com/googleapis/java-bigquerystorage/commit/bba0746a13c785621c4e4cbd2239060d67ce155b)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.13.8 ([#1690](https://github.com/googleapis/java-bigquerystorage/issues/1690)) ([195670d](https://github.com/googleapis/java-bigquerystorage/commit/195670d677864ed310898fe9f4b00cc28d942237)) + +## [2.15.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.14.2...v2.15.0) (2022-07-01) + + +### Features + +* add fields to eventually contain row level errors ([ec3ea29](https://github.com/googleapis/java-bigquerystorage/commit/ec3ea29efae1cf6567055d43219690b3d2db8b5e)) + + +### Bug Fixes + +* Modify client lib retry policy for CreateWriteStream with longer backoff, more error code and longer overall time ([#1679](https://github.com/googleapis/java-bigquerystorage/issues/1679)) ([ec3ea29](https://github.com/googleapis/java-bigquerystorage/commit/ec3ea29efae1cf6567055d43219690b3d2db8b5e)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.13.3 ([#1678](https://github.com/googleapis/java-bigquerystorage/issues/1678)) ([740f2ae](https://github.com/googleapis/java-bigquerystorage/commit/740f2ae070454fbc2a87fdd7c01c7f90fc3867f3)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.13.4 ([#1680](https://github.com/googleapis/java-bigquerystorage/issues/1680)) ([de1c8df](https://github.com/googleapis/java-bigquerystorage/commit/de1c8df710b0510d66e91ac42d5d56eba4442bdb)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.13.6 ([#1684](https://github.com/googleapis/java-bigquerystorage/issues/1684)) ([05cdb38](https://github.com/googleapis/java-bigquerystorage/commit/05cdb380d27ab7484c7f81a0a490a58de67694c6)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.13.7 ([#1687](https://github.com/googleapis/java-bigquerystorage/issues/1687)) ([8795ae7](https://github.com/googleapis/java-bigquerystorage/commit/8795ae78e55bb4eb651a6e78a2645e3fe8df8d8e)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.13.0 ([#1683](https://github.com/googleapis/java-bigquerystorage/issues/1683)) ([2821ee9](https://github.com/googleapis/java-bigquerystorage/commit/2821ee9d6893ce1f2ac68615d99973e2c98e3678)) + +## [2.14.2](https://github.com/googleapis/java-bigquerystorage/compare/v2.14.1...v2.14.2) (2022-06-08) + + +### Bug Fixes + +* **floating:** floating point numbers as numerics ([#1648](https://github.com/googleapis/java-bigquerystorage/issues/1648)) ([ecf8598](https://github.com/googleapis/java-bigquerystorage/commit/ecf8598bf66c0921524bbdb74d968edb389f197c)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.13.1 ([#1668](https://github.com/googleapis/java-bigquerystorage/issues/1668)) ([3eb8d4f](https://github.com/googleapis/java-bigquerystorage/commit/3eb8d4f0ab4db257899f7c72e02fef0a8b200c6e)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.13.2 ([#1671](https://github.com/googleapis/java-bigquerystorage/issues/1671)) ([2018ede](https://github.com/googleapis/java-bigquerystorage/commit/2018ede764ebf02e658f685922dd16438c70f69b)) + +## [2.14.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.14.0...v2.14.1) (2022-06-01) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.12.0 ([#1660](https://github.com/googleapis/java-bigquerystorage/issues/1660)) ([9d97a98](https://github.com/googleapis/java-bigquerystorage/commit/9d97a9848e87a32c1c4201243feb717d91a060ec)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.13.0 ([#1665](https://github.com/googleapis/java-bigquerystorage/issues/1665)) ([9c3a609](https://github.com/googleapis/java-bigquerystorage/commit/9c3a6099a2b622a08a4f94dd21846fa46c0fdde8)) + +## [2.14.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.13.0...v2.14.0) (2022-05-19) + + +### Features + +* add build scripts for native image testing in Java 17 ([#1440](https://github.com/googleapis/java-bigquerystorage/issues/1440)) ([#1655](https://github.com/googleapis/java-bigquerystorage/issues/1655)) ([ac2dfaf](https://github.com/googleapis/java-bigquerystorage/commit/ac2dfafa921fe489861e9767c360be413e5a2ec1)) + + +### Bug Fixes + +* Add a throwException behavior when the StreamWriter inflight queue is full ([#1642](https://github.com/googleapis/java-bigquerystorage/issues/1642)) ([4dcf0d5](https://github.com/googleapis/java-bigquerystorage/commit/4dcf0d5e161cd2530a5468c1e3b327db63c45185)) +* add extra JsonWriterTest to show that the LimitBehavior addition is not breaking ([#1643](https://github.com/googleapis/java-bigquerystorage/issues/1643)) ([320f5fc](https://github.com/googleapis/java-bigquerystorage/commit/320f5fc6a2a180e361f1a5a375095a65ec62003f)) +* ints/longs are numerics ([#1596](https://github.com/googleapis/java-bigquerystorage/issues/1596)) ([d046c8d](https://github.com/googleapis/java-bigquerystorage/commit/d046c8d5ff9943cd4731ac6fccc77956554820e1)), closes [#1516](https://github.com/googleapis/java-bigquerystorage/issues/1516) + + +### Dependencies + +* update arrow.version to v8 ([#1645](https://github.com/googleapis/java-bigquerystorage/issues/1645)) ([06e3c34](https://github.com/googleapis/java-bigquerystorage/commit/06e3c3407cd94db1c9ded667d1f25153dc281ba5)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.12.0 ([#1654](https://github.com/googleapis/java-bigquerystorage/issues/1654)) ([ec4f60b](https://github.com/googleapis/java-bigquerystorage/commit/ec4f60bd5c00846f1061f2cc1a79518d6afebdc5)) + +## [2.13.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.12.2...v2.13.0) (2022-05-05) + + +### Features + +* add support to a few more specific StorageErrors for the Write API ([#1563](https://github.com/googleapis/java-bigquerystorage/issues/1563)) ([c26091e](https://github.com/googleapis/java-bigquerystorage/commit/c26091e48b6542cf2078d46e2dbfe3220ab031f6)) +* next release from main branch is 2.12.2 ([#1624](https://github.com/googleapis/java-bigquerystorage/issues/1624)) ([b2aa2a4](https://github.com/googleapis/java-bigquerystorage/commit/b2aa2a43752e5a8a71f7bc434397b73d77d8eb58)) + + +### Bug Fixes + +* A stuck when the client fail to get DoneCallback ([#1637](https://github.com/googleapis/java-bigquerystorage/issues/1637)) ([3baa84e](https://github.com/googleapis/java-bigquerystorage/commit/3baa84e96671a14936d1667d0e036a1565fa5b7a)) +* Fix a possible NULL PTR after introduced timeout on waitForDone ([#1638](https://github.com/googleapis/java-bigquerystorage/issues/1638)) ([e1c6ded](https://github.com/googleapis/java-bigquerystorage/commit/e1c6ded336effbe302eee56df056a56a9dbb6b2f)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.10.10 ([#1623](https://github.com/googleapis/java-bigquerystorage/issues/1623)) ([54b74b8](https://github.com/googleapis/java-bigquerystorage/commit/54b74b80368252b93fb445c481731e0edfe3f5c3)) +* update dependency org.apache.avro:avro to v1.11.0 ([#1632](https://github.com/googleapis/java-bigquerystorage/issues/1632)) ([b47eea0](https://github.com/googleapis/java-bigquerystorage/commit/b47eea05d4db5acaa7337dc4e1faa018d8b4e40d)) + + +### Documentation + +* **samples:** update WriteComittedStream sample code to match best practices ([#1628](https://github.com/googleapis/java-bigquerystorage/issues/1628)) ([5d4c7e1](https://github.com/googleapis/java-bigquerystorage/commit/5d4c7e18b82ab85f7498e34a29920e9af765f918)) +* **sample:** update WriteToDefaultStream sample to match best practices ([#1631](https://github.com/googleapis/java-bigquerystorage/issues/1631)) ([73ddd7b](https://github.com/googleapis/java-bigquerystorage/commit/73ddd7b4fd44dce4be434726df57ecd84e6e3e6a)) + +### [2.12.2](https://github.com/googleapis/java-bigquerystorage/compare/v2.12.1...v2.12.2) (2022-04-18) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.10.7 ([#1614](https://github.com/googleapis/java-bigquerystorage/issues/1614)) ([ccdac87](https://github.com/googleapis/java-bigquerystorage/commit/ccdac87cc439b5f765d35af6d247b83122c1f40b)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.10.8 ([#1618](https://github.com/googleapis/java-bigquerystorage/issues/1618)) ([b9c50f1](https://github.com/googleapis/java-bigquerystorage/commit/b9c50f169d6092647becef5d99174be61b095ca8)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.10.9 ([#1620](https://github.com/googleapis/java-bigquerystorage/issues/1620)) ([7e1ae93](https://github.com/googleapis/java-bigquerystorage/commit/7e1ae93a1ee54a83d93a7421b438a53455dc89c2)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.10.0 ([#1619](https://github.com/googleapis/java-bigquerystorage/issues/1619)) ([55c4134](https://github.com/googleapis/java-bigquerystorage/commit/55c413459d029bf521eadf59f3c9fba9a7e876a1)) + +### [2.12.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.12.0...v2.12.1) (2022-04-08) + + +### Bug Fixes + +* even better ISO compilance ([#1607](https://github.com/googleapis/java-bigquerystorage/issues/1607)) ([5701597](https://github.com/googleapis/java-bigquerystorage/commit/570159799d8d464fbdb5bd617cc2a51f4f276f98)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.10.6 ([#1610](https://github.com/googleapis/java-bigquerystorage/issues/1610)) ([20e4225](https://github.com/googleapis/java-bigquerystorage/commit/20e4225734358bfce7c872e19c823ac6958d7905)) + +## [2.12.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.11.1...v2.12.0) (2022-04-01) + + +### Features + +* Deprecate format specific `row_count` field in Read API ([#1599](https://github.com/googleapis/java-bigquerystorage/issues/1599)) ([6f415f6](https://github.com/googleapis/java-bigquerystorage/commit/6f415f62685549f50f7382bc7d896e5f60c5285e)) + + +### Bug Fixes + +* better ISO8601 compliance ([#1589](https://github.com/googleapis/java-bigquerystorage/issues/1589)) ([29fa8b7](https://github.com/googleapis/java-bigquerystorage/commit/29fa8b73bc092a7ebe8e3951daf2026057d1d040)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.10.5 ([#1602](https://github.com/googleapis/java-bigquerystorage/issues/1602)) ([8787b5d](https://github.com/googleapis/java-bigquerystorage/commit/8787b5d36849981f9497ac2b0ddf8c5291b07fc8)) + +### [2.11.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.11.0...v2.11.1) (2022-03-29) + + +### Bug Fixes + +* Numerics cast from strings ([#1588](https://github.com/googleapis/java-bigquerystorage/issues/1588)) ([085768b](https://github.com/googleapis/java-bigquerystorage/commit/085768b41943237f50d5e47fa4ba5f22abff9fc6)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.10.0 ([#1578](https://github.com/googleapis/java-bigquerystorage/issues/1578)) ([adca54e](https://github.com/googleapis/java-bigquerystorage/commit/adca54eb0348d2ba02d3c272e180beebd9feb6c6)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.10.1 ([#1583](https://github.com/googleapis/java-bigquerystorage/issues/1583)) ([cc33d0e](https://github.com/googleapis/java-bigquerystorage/commit/cc33d0e539cc2ed53bf0ab265c5ebe55a0ac6c6e)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.10.2 ([#1587](https://github.com/googleapis/java-bigquerystorage/issues/1587)) ([d4ab7f6](https://github.com/googleapis/java-bigquerystorage/commit/d4ab7f64e32e4be8d1047d93b34cdadeccd98d90)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.10.3 ([#1591](https://github.com/googleapis/java-bigquerystorage/issues/1591)) ([0c62c85](https://github.com/googleapis/java-bigquerystorage/commit/0c62c85cf5fead39c04512c49db79d8e95fd44e6)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.10.4 ([#1594](https://github.com/googleapis/java-bigquerystorage/issues/1594)) ([05e9062](https://github.com/googleapis/java-bigquerystorage/commit/05e90620192747096854e60047cbca1fa6d81ea5)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.9.0 ([#1593](https://github.com/googleapis/java-bigquerystorage/issues/1593)) ([5234809](https://github.com/googleapis/java-bigquerystorage/commit/5234809e0482b05e40cbb6cecbb1154699255a59)) + +## [2.11.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.10.1...v2.11.0) (2022-03-14) + + +### Features + +* add JsonToProtoMessage support for TIMESTAMP ([#1574](https://github.com/googleapis/java-bigquerystorage/issues/1574)) ([6412fb2](https://github.com/googleapis/java-bigquerystorage/commit/6412fb2c03fc6b2c6d220d72bbd89e47922bf970)), closes [#1515](https://github.com/googleapis/java-bigquerystorage/issues/1515) + + +### Bug Fixes + +* a possible race condition that we used table schema out of the lock. ([#1575](https://github.com/googleapis/java-bigquerystorage/issues/1575)) ([b587638](https://github.com/googleapis/java-bigquerystorage/commit/b58763833ad4bd51515c055bbcb8e29d9fea05a9)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.9.1 ([#1562](https://github.com/googleapis/java-bigquerystorage/issues/1562)) ([7efdbf0](https://github.com/googleapis/java-bigquerystorage/commit/7efdbf0df14ccc84409d09b5c93ac1058c95d50e)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.9.3 ([#1570](https://github.com/googleapis/java-bigquerystorage/issues/1570)) ([3d281d2](https://github.com/googleapis/java-bigquerystorage/commit/3d281d2e01f9170eac92ac51757fbd434d9b6b73)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.9.4 ([#1572](https://github.com/googleapis/java-bigquerystorage/issues/1572)) ([d3935cd](https://github.com/googleapis/java-bigquerystorage/commit/d3935cd3120f49aaae4c4e6839782bfa05836e90)) + +### [2.10.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.10.0...v2.10.1) (2022-03-03) + + +### Bug Fixes + +* change customer StorageException from RuntimeException to StatusRuntimeException ([#1559](https://github.com/googleapis/java-bigquerystorage/issues/1559)) ([523377e](https://github.com/googleapis/java-bigquerystorage/commit/523377e0bbfbae33165258b8eeed812c81cfeb82)) + + +### Dependencies + +* update actions/checkout action to v3 ([#1554](https://github.com/googleapis/java-bigquerystorage/issues/1554)) ([1371ac9](https://github.com/googleapis/java-bigquerystorage/commit/1371ac91bad55711b66e08ddeed6302598329a46)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.8.0 ([#1555](https://github.com/googleapis/java-bigquerystorage/issues/1555)) ([cc25162](https://github.com/googleapis/java-bigquerystorage/commit/cc25162021a2b05e68fd3dd1f98e252a10626f6b)) + + +### Documentation + +* improve documentation for write client ([#1560](https://github.com/googleapis/java-bigquerystorage/issues/1560)) ([fbb5321](https://github.com/googleapis/java-bigquerystorage/commit/fbb5321bd35bdd7ba81bcb95278d6cdd9abd3dee)) + +## [2.10.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.9.1...v2.10.0) (2022-02-28) + + +### Features + +* add trace_id for Read API ([#1544](https://github.com/googleapis/java-bigquerystorage/issues/1544)) ([5a92ade](https://github.com/googleapis/java-bigquerystorage/commit/5a92ade809d16e2770c07bd5287fbccb75645b21)) + + +### Bug Fixes + +* Add a e2e test on byte string array and remove a impossible case for byte array conversion ([#1546](https://github.com/googleapis/java-bigquerystorage/issues/1546)) ([adcb9bb](https://github.com/googleapis/java-bigquerystorage/commit/adcb9bb699e54d93da320e6a458caff79faa2f9f)) +* add more idiomatic way to insert ARRAY data ([#1550](https://github.com/googleapis/java-bigquerystorage/issues/1550)) ([3ae4038](https://github.com/googleapis/java-bigquerystorage/commit/3ae40381aeacd21676f2c896216faa26eb1ef960)) +* remove bigquery.readonly auth scope ([#1543](https://github.com/googleapis/java-bigquerystorage/issues/1543)) ([a70ae45](https://github.com/googleapis/java-bigquerystorage/commit/a70ae45c013a311aaeccd6885a9b9752bbcf57cc)) + + +### Dependencies + +* update actions/github-script action to v6 ([#1533](https://github.com/googleapis/java-bigquerystorage/issues/1533)) ([370c3b5](https://github.com/googleapis/java-bigquerystorage/commit/370c3b5bf236ab7853c4cc6d32393f93f5a165e4)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.9.0 ([#1538](https://github.com/googleapis/java-bigquerystorage/issues/1538)) ([43446b2](https://github.com/googleapis/java-bigquerystorage/commit/43446b24438526cc31542a0181fbed610df4b3c0)) + + +### Documentation + +* **tutorials:** Add STRUCT column to tutorial ([#1534](https://github.com/googleapis/java-bigquerystorage/issues/1534)) ([9595dab](https://github.com/googleapis/java-bigquerystorage/commit/9595dab89144cb13e812f4a277bb424d98c6591c)) +* **tutorials:** Call append asynchronously instead of blocking ([#1542](https://github.com/googleapis/java-bigquerystorage/issues/1542)) ([c777e23](https://github.com/googleapis/java-bigquerystorage/commit/c777e236c21204300231c3d3cbfaa3760da643b8)) + +### [2.9.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.9.0...v2.9.1) (2022-02-11) + + +### Bug Fixes + +* rollback the reconnect after 10MB behavior after the fix for omg… ([#1530](https://github.com/googleapis/java-bigquerystorage/issues/1530)) ([737bd0e](https://github.com/googleapis/java-bigquerystorage/commit/737bd0e2c21332fd1c0c82a12e31ce5749a078fd)) + + +### Dependencies + +* update arrow.version to v7 (major) ([#1526](https://github.com/googleapis/java-bigquerystorage/issues/1526)) ([b6a4479](https://github.com/googleapis/java-bigquerystorage/commit/b6a4479e023c6c05bccd3451e929105a1f7b8b9d)) +* update dependency org.apache.arrow:arrow-memory-netty to v7 ([#1527](https://github.com/googleapis/java-bigquerystorage/issues/1527)) ([06e927f](https://github.com/googleapis/java-bigquerystorage/commit/06e927f071271c12d7c1904b2d2c3f7f9e8338c7)) +* update dependency org.apache.arrow:arrow-vector to v7 ([#1528](https://github.com/googleapis/java-bigquerystorage/issues/1528)) ([70297cc](https://github.com/googleapis/java-bigquerystorage/commit/70297ccd4828fc08bdf3b021af103aa01975ccb3)) + +## [2.9.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.8.4...v2.9.0) (2022-02-08) + + +### Features + +* add a indicator of how much time a request is waiting for inflight limit ([#1514](https://github.com/googleapis/java-bigquerystorage/issues/1514)) ([54874be](https://github.com/googleapis/java-bigquerystorage/commit/54874be945c2b88be6be03ae654277445c17741d)) +* temp workaround for omg/48020 ([#1521](https://github.com/googleapis/java-bigquerystorage/issues/1521)) ([ff28f65](https://github.com/googleapis/java-bigquerystorage/commit/ff28f65328fbd433117a314cdff7510a28160591)) + + +### Bug Fixes + +* add a DATE type e2e test ([#1511](https://github.com/googleapis/java-bigquerystorage/issues/1511)) ([5eaf38e](https://github.com/googleapis/java-bigquerystorage/commit/5eaf38ee7fb9f84c454e0e9da5fe9092ff4c456a)) + + +### Documentation + +* **tutorials:** Add IT to JsonWriteDefaultStream tutorial ([#1522](https://github.com/googleapis/java-bigquerystorage/issues/1522)) ([9fd7aca](https://github.com/googleapis/java-bigquerystorage/commit/9fd7aca8b6032a2c02d0ea91dd063c9ac9e151aa)) + +### [2.8.4](https://github.com/googleapis/java-bigquerystorage/compare/v2.8.3...v2.8.4) (2022-02-03) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.7.1 ([#1505](https://github.com/googleapis/java-bigquerystorage/issues/1505)) ([a700d92](https://github.com/googleapis/java-bigquerystorage/commit/a700d92c9f591f2b653aad6d79b2f61c4c44df98)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.8.0 ([#1508](https://github.com/googleapis/java-bigquerystorage/issues/1508)) ([b9ab6aa](https://github.com/googleapis/java-bigquerystorage/commit/b9ab6aac2f360f9ab541d83ad1f2d921657a56bf)) + + +### Documentation + +* **tutorials:** add integration test file structure for tutorials ([#1507](https://github.com/googleapis/java-bigquerystorage/issues/1507)) ([ecb1fbb](https://github.com/googleapis/java-bigquerystorage/commit/ecb1fbb1186a2bb22a2c27ba85ff6ecfab883011)) +* **tutorials:** Add README, other small fixes to JsonWriterDefaultStream tutorial ([#1504](https://github.com/googleapis/java-bigquerystorage/issues/1504)) ([691eae5](https://github.com/googleapis/java-bigquerystorage/commit/691eae56f80a6751f25ef0db4f1d7b4bd6db2f66)) + +### [2.8.3](https://github.com/googleapis/java-bigquerystorage/compare/v2.8.2...v2.8.3) (2022-01-28) + + +### Dependencies + +* **java:** update actions/github-script action to v5 ([#1339](https://github.com/googleapis/java-bigquerystorage/issues/1339)) ([#1491](https://github.com/googleapis/java-bigquerystorage/issues/1491)) ([7eea012](https://github.com/googleapis/java-bigquerystorage/commit/7eea012ee49ebba9c97273abd6331a63868bf72e)) +* update actions/github-script action to v5 ([#1492](https://github.com/googleapis/java-bigquerystorage/issues/1492)) ([580c033](https://github.com/googleapis/java-bigquerystorage/commit/580c03363e5f3b8231e8ef491164badf0b126ca8)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.7.0 ([#1499](https://github.com/googleapis/java-bigquerystorage/issues/1499)) ([0d414f8](https://github.com/googleapis/java-bigquerystorage/commit/0d414f8eaca6daf89b3f981f2c727b217a59e658)) +* upgrade google-cloud-shared-dependencies to v2.7.0 ([#1501](https://github.com/googleapis/java-bigquerystorage/issues/1501)) ([3f75e83](https://github.com/googleapis/java-bigquerystorage/commit/3f75e83cb5140f633204374a315831ccd09dfae3)) + + +### Documentation + +* **tutorials:** add a README.md ([#1500](https://github.com/googleapis/java-bigquerystorage/issues/1500)) ([c50c6a0](https://github.com/googleapis/java-bigquerystorage/commit/c50c6a039a47cc6e0a1e56966142b21e476074c8)) +* **tutorials:** add new tutorials directory and JsonWriterDefaultStream tutorial ([#1498](https://github.com/googleapis/java-bigquerystorage/issues/1498)) ([fd1ecf2](https://github.com/googleapis/java-bigquerystorage/commit/fd1ecf2a8e7b96b7edbf5c9b6d12dedee8c9ba49)) + +### [2.8.2](https://github.com/googleapis/java-bigquerystorage/compare/v2.8.1...v2.8.2) (2022-01-25) + + +### Bug Fixes + +* **java:** add reflection configuration for native-image testing ([#1488](https://github.com/googleapis/java-bigquerystorage/issues/1488)) ([d87fe28](https://github.com/googleapis/java-bigquerystorage/commit/d87fe282800b4ef4a9c5ecf918278a84eb20ce5d)) + + +### Documentation + +* **samples:** add finalize call to our samples ([#1471](https://github.com/googleapis/java-bigquerystorage/issues/1471)) ([47e3654](https://github.com/googleapis/java-bigquerystorage/commit/47e36543480bf495f4caea63419bb3b31ac7638c)) +* **samples:** update to v1 in doc URls ([#1489](https://github.com/googleapis/java-bigquerystorage/issues/1489)) ([7efe427](https://github.com/googleapis/java-bigquerystorage/commit/7efe4273d9afebac6f67ed46763d5896b4b1bbcd)) + + +### Dependencies + +* exclude un-used runtime dependencies brought in by gax-grpc ([#1490](https://github.com/googleapis/java-bigquerystorage/issues/1490)) ([3e7833e](https://github.com/googleapis/java-bigquerystorage/commit/3e7833ea33ceac5f3e7cb9696a6e7ef6e914469b)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.6.1 ([#1473](https://github.com/googleapis/java-bigquerystorage/issues/1473)) ([f996cdb](https://github.com/googleapis/java-bigquerystorage/commit/f996cdbbc6cc9ad7c6a649ebb649ae005054f36e)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.6.2 ([#1475](https://github.com/googleapis/java-bigquerystorage/issues/1475)) ([37c1fd8](https://github.com/googleapis/java-bigquerystorage/commit/37c1fd887acfc5676ebb55e085b6e3514b6e5ca4)) +* update dependency kr.motd.maven:os-maven-plugin to v1.7.0 ([#1476](https://github.com/googleapis/java-bigquerystorage/issues/1476)) ([e038151](https://github.com/googleapis/java-bigquerystorage/commit/e038151442c4fd86e94ab7f3d02989c73a23ee91)) + +### [2.8.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.8.0...v2.8.1) (2022-01-07) + + +### Bug Fixes + +* **java:** run Maven in plain console-friendly mode ([#1301](https://www.github.com/googleapis/java-bigquerystorage/issues/1301)) ([#1461](https://www.github.com/googleapis/java-bigquerystorage/issues/1461)) ([6b9e08c](https://www.github.com/googleapis/java-bigquerystorage/commit/6b9e08c4736cfdb4d98bf3c8f782a87ab68580ce)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.6.0 ([#1459](https://www.github.com/googleapis/java-bigquerystorage/issues/1459)) ([323813c](https://www.github.com/googleapis/java-bigquerystorage/commit/323813c88cd673bbf3f8631f03ac1732f7d3ac53)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.6.0 ([#1467](https://www.github.com/googleapis/java-bigquerystorage/issues/1467)) ([8ebb448](https://www.github.com/googleapis/java-bigquerystorage/commit/8ebb448ff0a1188124c1deeebe4bddbfe277e35b)) + +## [2.8.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.7.0...v2.8.0) (2021-12-30) + + +### Features + +* Add ignoreUnknownField support in JsonWriter ([#1455](https://www.github.com/googleapis/java-bigquerystorage/issues/1455)) ([4616adb](https://www.github.com/googleapis/java-bigquerystorage/commit/4616adb30d07d7be522b74837adae080eca103a6)) +* Add reconnect support to v1 client lib. ([#1446](https://www.github.com/googleapis/java-bigquerystorage/issues/1446)) ([a5157fa](https://www.github.com/googleapis/java-bigquerystorage/commit/a5157faf37354ee48a0af930a68edf4823b5be6f)) +* add schema update support in JsonStreamWriter ([#1447](https://www.github.com/googleapis/java-bigquerystorage/issues/1447)) ([973afcc](https://www.github.com/googleapis/java-bigquerystorage/commit/973afccc22507b8109b3920d3fc183692dcc66c2)) +* support `append()` without offset in `StreamWriter` ([#1452](https://www.github.com/googleapis/java-bigquerystorage/issues/1452)) ([cb8b0ad](https://www.github.com/googleapis/java-bigquerystorage/commit/cb8b0ad25fbf07c3509b3d94244a999b9cb90e54)) + + +### Bug Fixes + +* allow all fields to be null values ([#1450](https://www.github.com/googleapis/java-bigquerystorage/issues/1450)) ([e47ac79](https://www.github.com/googleapis/java-bigquerystorage/commit/e47ac791765486f02f027d3c440645ef2a379ba9)) +* finish BIGNUMERIC support ([#1449](https://www.github.com/googleapis/java-bigquerystorage/issues/1449)) ([d9d51cd](https://www.github.com/googleapis/java-bigquerystorage/commit/d9d51cd203733a972c80e312e9e279b50fb260fa)) +* fix a NullPtr when user closes a writer without connection being ever established ([#1454](https://www.github.com/googleapis/java-bigquerystorage/issues/1454)) ([b774f5d](https://www.github.com/googleapis/java-bigquerystorage/commit/b774f5d0d00275278708d1299b4912b455be4bed)) +* update storageError support due to server side enhancement ([#1456](https://www.github.com/googleapis/java-bigquerystorage/issues/1456)) ([6243ad5](https://www.github.com/googleapis/java-bigquerystorage/commit/6243ad5cba61d4dae7f4ceb60b09c625e7589215)) + +## [2.7.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.6.5...v2.7.0) (2021-12-07) + + +### Features + +* add write_mode support for BigQuery Storage Write API v1 ([#1441](https://www.github.com/googleapis/java-bigquerystorage/issues/1441)) ([ffe0670](https://www.github.com/googleapis/java-bigquerystorage/commit/ffe067011a79eb96e36f12cb738c34e2763e7c59)) + + +### Bug Fixes + +* **java:** add -ntp flag to native image testing command ([#1299](https://www.github.com/googleapis/java-bigquerystorage/issues/1299)) ([#1439](https://www.github.com/googleapis/java-bigquerystorage/issues/1439)) ([b6a880c](https://www.github.com/googleapis/java-bigquerystorage/commit/b6a880c90acd195d5f4294de343d81cffe8c359a)) + +### [2.6.5](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.6.4...v2.6.5) (2021-12-05) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.5.1 ([#1435](https://www.github.com/googleapis/java-bigquerystorage/issues/1435)) ([9116ecf](https://www.github.com/googleapis/java-bigquerystorage/commit/9116ecfc7257265ed575888c4cdc27d8fa4e7692)) + +### [2.6.4](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.6.3...v2.6.4) (2021-12-03) + + +### Reverts + +* "chore(deps): update dependency com.google.cloud.samples:shared-configuration to v1.0.24" ([#1431](https://www.github.com/googleapis/java-bigquerystorage/issues/1431)) ([b4fab5f](https://www.github.com/googleapis/java-bigquerystorage/commit/b4fab5f00b8d6a58b9768d32f16fb3a013265690)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.5.0 ([#1427](https://www.github.com/googleapis/java-bigquerystorage/issues/1427)) ([bc69fee](https://www.github.com/googleapis/java-bigquerystorage/commit/bc69fee6cfa1ef152ed1a6da61fb4728f1639aa8)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.5.1 ([#1429](https://www.github.com/googleapis/java-bigquerystorage/issues/1429)) ([543c63e](https://www.github.com/googleapis/java-bigquerystorage/commit/543c63e6c00da9a177994b4292ae5cda2ceeab65)) + +### [2.6.3](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.6.2...v2.6.3) (2021-11-19) + + +### Dependencies + +* update arrow.version to v6.0.1 ([#1417](https://www.github.com/googleapis/java-bigquerystorage/issues/1417)) ([e545cd2](https://www.github.com/googleapis/java-bigquerystorage/commit/e545cd242b5fb12a469a1e3cf389403d889e49cc)) + +### [2.6.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.6.1...v2.6.2) (2021-11-17) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.4.1 ([#1412](https://www.github.com/googleapis/java-bigquerystorage/issues/1412)) ([e862027](https://www.github.com/googleapis/java-bigquerystorage/commit/e862027ad85c337b352a99fcd669cceb14ceed8c)) + +### [2.6.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.6.0...v2.6.1) (2021-11-17) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.4.0 ([#1408](https://www.github.com/googleapis/java-bigquerystorage/issues/1408)) ([0ad6ce7](https://www.github.com/googleapis/java-bigquerystorage/commit/0ad6ce7311b9402fd0b4539833f3cf42e485873c)) + +## [2.6.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.5.1...v2.6.0) (2021-11-15) + + +### Features + +* add support for StorageError ([#1391](https://www.github.com/googleapis/java-bigquerystorage/issues/1391)) ([176dc8b](https://www.github.com/googleapis/java-bigquerystorage/commit/176dc8b1fa30cf9a9a98628bd83c9ab487ddb094)) +* DATE and DOUBLE support more input formats ([#1397](https://www.github.com/googleapis/java-bigquerystorage/issues/1397)) ([9c8dc0a](https://www.github.com/googleapis/java-bigquerystorage/commit/9c8dc0aabb471fdb8580f434cc5a66bad585e8f2)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.5.0 ([#1406](https://www.github.com/googleapis/java-bigquerystorage/issues/1406)) ([38f5eb9](https://www.github.com/googleapis/java-bigquerystorage/commit/38f5eb9788e1f090c0034a789a512dcec2384fdb)) + +### [2.5.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.5.0...v2.5.1) (2021-11-03) + + +### Dependencies + +* update dependency org.apache.avro:avro to v1.11.0 ([#1387](https://www.github.com/googleapis/java-bigquerystorage/issues/1387)) ([117ad11](https://www.github.com/googleapis/java-bigquerystorage/commit/117ad11c91850c67c6b99bdecf729e48728bfbbe)) + +## [2.5.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.4.2...v2.5.0) (2021-10-26) + + +### Features + +* next release from main branch is 2.4.1 ([#1376](https://www.github.com/googleapis/java-bigquerystorage/issues/1376)) ([ca21447](https://www.github.com/googleapis/java-bigquerystorage/commit/ca21447e56dacbe2cec4722f037d441479ec2a8f)) + + +### Bug Fixes + +* BQ/proto schema names should be compared lowercase ([#1369](https://www.github.com/googleapis/java-bigquerystorage/issues/1369)) ([a0da90e](https://www.github.com/googleapis/java-bigquerystorage/commit/a0da90e8de89c5b9ef77183fee86e1e1a7389e00)) + + +### Dependencies + +* update arrow.version to v6 ([#1383](https://www.github.com/googleapis/java-bigquerystorage/issues/1383)) ([f2e3562](https://www.github.com/googleapis/java-bigquerystorage/commit/f2e3562fc1ed0a1a1e71c077dc6d37ae3fe6b3be)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.3.2 ([#1378](https://www.github.com/googleapis/java-bigquerystorage/issues/1378)) ([5983419](https://www.github.com/googleapis/java-bigquerystorage/commit/5983419a0d71849787876ecb5715c919f6ce04f9)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.3.3 ([#1382](https://www.github.com/googleapis/java-bigquerystorage/issues/1382)) ([8705af7](https://www.github.com/googleapis/java-bigquerystorage/commit/8705af7a11aa6a8f8182193011fc8bc7c4a28a4f)) + +### [2.4.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.4.1...v2.4.2) (2021-10-20) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.3.1 ([#1370](https://www.github.com/googleapis/java-bigquerystorage/issues/1370)) ([ba1a1ad](https://www.github.com/googleapis/java-bigquerystorage/commit/ba1a1ad1d9ce6d87ec8bdcfd3412792180c58742)) + +### [2.4.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.4.0...v2.4.1) (2021-10-19) + + +### Bug Fixes + +* remove excessive StreamWriter info logging ([#1359](https://www.github.com/googleapis/java-bigquerystorage/issues/1359)) ([0d27d5a](https://www.github.com/googleapis/java-bigquerystorage/commit/0d27d5a10ad62d506a646dcbf0a196e1d57740b0)) + + +### Documentation + +* **samples:** Update samples to v1 ([#1364](https://www.github.com/googleapis/java-bigquerystorage/issues/1364)) ([b705783](https://www.github.com/googleapis/java-bigquerystorage/commit/b705783d5939c1df96dc565cb763365e1989adb6)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.3.0 ([#1361](https://www.github.com/googleapis/java-bigquerystorage/issues/1361)) ([c8f0051](https://www.github.com/googleapis/java-bigquerystorage/commit/c8f00517a385461a2816afe7b95f8d5347e2fc10)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.4.0 ([#1365](https://www.github.com/googleapis/java-bigquerystorage/issues/1365)) ([69e96aa](https://www.github.com/googleapis/java-bigquerystorage/commit/69e96aaf8f13bd4887ec34af8db2bb5bf740282f)) + +## [2.4.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.3.3...v2.4.0) (2021-10-12) + + +### Features + +* Add INTERVAL and JSON type support to JsonStreamWriter ([#1351](https://www.github.com/googleapis/java-bigquerystorage/issues/1351)) ([f2121a7](https://www.github.com/googleapis/java-bigquerystorage/commit/f2121a796fa04d153124d87bd0d1041a401c0a42)) + +### [2.3.3](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.3.2...v2.3.3) (2021-10-05) + + +### Bug Fixes + +* add string to DATETIME, TIME, NUMERIC, BIGNUMERIC support in JsonStreamWriter v1 ([#1345](https://www.github.com/googleapis/java-bigquerystorage/issues/1345)) ([9d272dd](https://www.github.com/googleapis/java-bigquerystorage/commit/9d272dd711dfe8909a7b421dabdcdef30f1dc689)) +* JsonWriter accepts string input for DATETIME, TIME, NUMERIC, BIGNUMERIC field ([#1339](https://www.github.com/googleapis/java-bigquerystorage/issues/1339)) ([691f078](https://www.github.com/googleapis/java-bigquerystorage/commit/691f078f09e32fa7784d4afeeee0e8071f78d6fd)) +* Sample should show sending multiple rows in one request ([#1335](https://www.github.com/googleapis/java-bigquerystorage/issues/1335)) ([3f85a68](https://www.github.com/googleapis/java-bigquerystorage/commit/3f85a68d6812aac94ca8a266d76be2aa94cd0b32)) + + +### Documentation + +* **samples:** Add WriteAPI BUFFERED mode sample ([#1338](https://www.github.com/googleapis/java-bigquerystorage/issues/1338)) ([5dfd523](https://www.github.com/googleapis/java-bigquerystorage/commit/5dfd5231c1ffa68a1b92ba01a9608d8cee2c0596)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.2.0 ([#1346](https://www.github.com/googleapis/java-bigquerystorage/issues/1346)) ([9370eb8](https://www.github.com/googleapis/java-bigquerystorage/commit/9370eb8ea74547792f9597d20707546836c417b6)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.2.1 ([#1347](https://www.github.com/googleapis/java-bigquerystorage/issues/1347)) ([406f5d3](https://www.github.com/googleapis/java-bigquerystorage/commit/406f5d3d446d3a9639b2c95590b3c6c38118d741)) + +### [2.3.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.3.1...v2.3.2) (2021-10-01) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.13 ([#1336](https://www.github.com/googleapis/java-bigquerystorage/issues/1336)) ([8b2e46f](https://www.github.com/googleapis/java-bigquerystorage/commit/8b2e46f680d95b4e59734cc7793b8d9b69e718a8)) + +### [2.3.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.3.0...v2.3.1) (2021-09-27) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.12 ([#1327](https://www.github.com/googleapis/java-bigquerystorage/issues/1327)) ([67e694c](https://www.github.com/googleapis/java-bigquerystorage/commit/67e694c447bdea65baefb89dbe738239107f32d9)) + +## [2.3.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.2.1...v2.3.0) (2021-09-24) + + +### Features + +* writeapi v1 manual client lib ([#1323](https://www.github.com/googleapis/java-bigquerystorage/issues/1323)) ([baf8fb3](https://www.github.com/googleapis/java-bigquerystorage/commit/baf8fb3adc2e5135b71dd918ab30b619493a1b83)) + +### [2.2.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.2.0...v2.2.1) (2021-09-23) + + +### Bug Fixes + +* add missing read api retry setting on SplitReadStream ([#1315](https://www.github.com/googleapis/java-bigquerystorage/issues/1315)) ([015d6f4](https://www.github.com/googleapis/java-bigquerystorage/commit/015d6f4bbae4d7a4e8351e8173b25086372cfd8d)) + + +### Documentation + +* Align session length with public documentation feat: Expose estimated bytes that a session will scan. ([#1310](https://www.github.com/googleapis/java-bigquerystorage/issues/1310)) ([fff5c89](https://www.github.com/googleapis/java-bigquerystorage/commit/fff5c89738187bc4a34ce649ccf9c725a76282f7)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.10 ([#1318](https://www.github.com/googleapis/java-bigquerystorage/issues/1318)) ([f5d8b2d](https://www.github.com/googleapis/java-bigquerystorage/commit/f5d8b2d3fc3036de3b3e1167cd59bd826e451e45)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.11 ([#1320](https://www.github.com/googleapis/java-bigquerystorage/issues/1320)) ([12a2b03](https://www.github.com/googleapis/java-bigquerystorage/commit/12a2b03f0146173249c5b7c4e906e995e2fa6212)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.8 ([#1311](https://www.github.com/googleapis/java-bigquerystorage/issues/1311)) ([e5aa8fd](https://www.github.com/googleapis/java-bigquerystorage/commit/e5aa8fdcb4e1ba111f5eb23b82161b88a2012e31)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.9 ([#1317](https://www.github.com/googleapis/java-bigquerystorage/issues/1317)) ([600531d](https://www.github.com/googleapis/java-bigquerystorage/commit/600531d39f1642dcc4fda44790edc5dbb54d04f7)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.2.1 ([#1308](https://www.github.com/googleapis/java-bigquerystorage/issues/1308)) ([7be798d](https://www.github.com/googleapis/java-bigquerystorage/commit/7be798d3a1aa00bd7a4329b7539bf72cb301c844)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.3.0 ([#1319](https://www.github.com/googleapis/java-bigquerystorage/issues/1319)) ([2f2a44c](https://www.github.com/googleapis/java-bigquerystorage/commit/2f2a44cf649e29dce52ac36ffda91777158caf9a)) + +## [2.2.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.1.2...v2.2.0) (2021-09-10) + + +### Features + +* add trace id support to JsonWriter and add default trace id to help identify json writer users. ([#1302](https://www.github.com/googleapis/java-bigquerystorage/issues/1302)) ([0e749d9](https://www.github.com/googleapis/java-bigquerystorage/commit/0e749d9ee0201bc4653735ac34638452502a1c26)) + + +### Bug Fixes + +* Accept null json values in JsonToProtoMessage converter ([#1288](https://www.github.com/googleapis/java-bigquerystorage/issues/1288)) ([fb515ab](https://www.github.com/googleapis/java-bigquerystorage/commit/fb515ab0d8681bb7e559e3788f0ec8c0852b6e64)) +* beta cleanup: remove deprecated classes and unused functionalities ([#1300](https://www.github.com/googleapis/java-bigquerystorage/issues/1300)) ([7d08d7b](https://www.github.com/googleapis/java-bigquerystorage/commit/7d08d7b96bc13b6bc72e795b0b975a77d7712882)) +* Cannot share client between appendRows and other function becaus… ([#1290](https://www.github.com/googleapis/java-bigquerystorage/issues/1290)) ([c2dcf6f](https://www.github.com/googleapis/java-bigquerystorage/commit/c2dcf6fea0e6c5ed8085269fbec884deb5f32ad0)) +* remove dependency on google-cloud-bigquery (cyclic dep) ([#1295](https://www.github.com/googleapis/java-bigquerystorage/issues/1295)) ([7ac47de](https://www.github.com/googleapis/java-bigquerystorage/commit/7ac47de0ad5a37fcc7bbb6ea7ce0f5b7e59d276c)), closes [#1249](https://www.github.com/googleapis/java-bigquerystorage/issues/1249) +* the request limit should be 10MB instead of 8MB ([#1289](https://www.github.com/googleapis/java-bigquerystorage/issues/1289)) ([ed1ea00](https://www.github.com/googleapis/java-bigquerystorage/commit/ed1ea00e25da1501d300f3c80ce0a17cc1f3d883)) +* update comment to be consistent with code ([#1292](https://www.github.com/googleapis/java-bigquerystorage/issues/1292)) ([5669420](https://www.github.com/googleapis/java-bigquerystorage/commit/56694206740ded2edc115a88c0bf4bf4229b4fdf)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.7 ([#1298](https://www.github.com/googleapis/java-bigquerystorage/issues/1298)) ([c156410](https://www.github.com/googleapis/java-bigquerystorage/commit/c1564102147ceaf0bd77b9c86a9e5209dbc73174)) + + +### Documentation + +* Align session length with public documentation feat: Align ReadRows timeout with other versions of the API. ([#1281](https://www.github.com/googleapis/java-bigquerystorage/issues/1281)) ([b920cd5](https://www.github.com/googleapis/java-bigquerystorage/commit/b920cd59756b9db60026035605447236d4a0adf0)) +* **samples:** update WriteToDefaultStream.java sample ([#1305](https://www.github.com/googleapis/java-bigquerystorage/issues/1305)) ([83c8e23](https://www.github.com/googleapis/java-bigquerystorage/commit/83c8e23b45ad233d82af89df1b61cc39b22ffe1c)) + +### [2.1.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.1.1...v2.1.2) (2021-09-02) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.5 ([#1279](https://www.github.com/googleapis/java-bigquerystorage/issues/1279)) ([59d031e](https://www.github.com/googleapis/java-bigquerystorage/commit/59d031ee1f4ab1f06bcc7b2d856bc11d29f13f53)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.6 ([#1284](https://www.github.com/googleapis/java-bigquerystorage/issues/1284)) ([8d62b9c](https://www.github.com/googleapis/java-bigquerystorage/commit/8d62b9c31db3077be62650873b81fe3a12110879)) + +### [2.1.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.1.0...v2.1.1) (2021-08-31) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-bigquery to v2-rev20210819-1.32.1 ([#1273](https://www.github.com/googleapis/java-bigquerystorage/issues/1273)) ([0ab4bbb](https://www.github.com/googleapis/java-bigquerystorage/commit/0ab4bbbb1203f0dda49be721df7e2cee3838ec0b)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.3 ([#1266](https://www.github.com/googleapis/java-bigquerystorage/issues/1266)) ([e72d50f](https://www.github.com/googleapis/java-bigquerystorage/commit/e72d50f2d11c629c1bf51b99d6156b3c2004c669)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.4 ([#1269](https://www.github.com/googleapis/java-bigquerystorage/issues/1269)) ([c09c987](https://www.github.com/googleapis/java-bigquerystorage/commit/c09c9877008a2279878edf4e35b0a60e8a59c107)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.2.0 ([#1276](https://www.github.com/googleapis/java-bigquerystorage/issues/1276)) ([7254177](https://www.github.com/googleapis/java-bigquerystorage/commit/725417743876a766e8cd17b1f557540984c5d49b)) + +## [2.1.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.0.4...v2.1.0) (2021-08-24) + + +### Features + +* retry certain RESOURCE_EXHAUSTED errors observed during ReadRows and report retry attempts ([#1257](https://www.github.com/googleapis/java-bigquerystorage/issues/1257)) ([d56e1ca](https://www.github.com/googleapis/java-bigquerystorage/commit/d56e1caf91297d7c2e1e4a9ce1463c04e44619c0)) + + +### Documentation + +* **sample:** Remove `client` from `JsonStreamWriter` in `WriteCommittedStream` ([#1248](https://www.github.com/googleapis/java-bigquerystorage/issues/1248)) ([6d38bd5](https://www.github.com/googleapis/java-bigquerystorage/commit/6d38bd5e3ff383e55e852081bbea5807796f59dd)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.1.0 ([#1261](https://www.github.com/googleapis/java-bigquerystorage/issues/1261)) ([0edb25d](https://www.github.com/googleapis/java-bigquerystorage/commit/0edb25d4a55f5480d5717672f30b09e6433483b9)) + +### [2.0.4](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.0.3...v2.0.4) (2021-08-19) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-bigquery to v2-rev20210813-1.32.1 ([#1253](https://www.github.com/googleapis/java-bigquerystorage/issues/1253)) ([b25c960](https://www.github.com/googleapis/java-bigquerystorage/commit/b25c96083b7a2910b5e3cfc92b5d77408e53ee66)) + +### [2.0.3](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.0.2...v2.0.3) (2021-08-19) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-bigquery to v2-rev20210727-1.32.1 ([#1237](https://www.github.com/googleapis/java-bigquerystorage/issues/1237)) ([29e5204](https://www.github.com/googleapis/java-bigquerystorage/commit/29e52041a4d47cefbd92f51e4bf13cb160bd76da)) +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.2 ([#1239](https://www.github.com/googleapis/java-bigquerystorage/issues/1239)) ([5934277](https://www.github.com/googleapis/java-bigquerystorage/commit/593427798a13c6375db26f5656e7956e3dc10164)) + +### [2.0.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.0.1...v2.0.2) (2021-08-12) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v2.1.0 ([#1232](https://www.github.com/googleapis/java-bigquerystorage/issues/1232)) ([cef4fcb](https://www.github.com/googleapis/java-bigquerystorage/commit/cef4fcb6297dc94252ccb323205c34da7435a778)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.0.1 ([#1233](https://www.github.com/googleapis/java-bigquerystorage/issues/1233)) ([27169f0](https://www.github.com/googleapis/java-bigquerystorage/commit/27169f016ac8302b465a24e92995a54831197064)) + +### [2.0.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v2.0.0...v2.0.1) (2021-08-10) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.137.2 ([#1224](https://www.github.com/googleapis/java-bigquerystorage/issues/1224)) ([d8a312a](https://www.github.com/googleapis/java-bigquerystorage/commit/d8a312afdba4e2f81e25f9263d27e0c97f725f75)) +* update dependency com.google.cloud:google-cloud-bigquery to v2 ([#1226](https://www.github.com/googleapis/java-bigquerystorage/issues/1226)) ([11c811e](https://www.github.com/googleapis/java-bigquerystorage/commit/11c811ecd44e375e4414578b666dc56d571e0502)) + +## [2.0.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.23.2...v2.0.0) (2021-08-05) + + +### ⚠ BREAKING CHANGES + +* Java 8 breaking generator changes (#1222) + +### Features + +* Java 8 breaking generator changes ([#1222](https://www.github.com/googleapis/java-bigquerystorage/issues/1222)) ([ac9103c](https://www.github.com/googleapis/java-bigquerystorage/commit/ac9103c18b96933cb724ae9c96a46005bae8160d)) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-bigquery to v2-rev20210726-1.32.1 ([#1217](https://www.github.com/googleapis/java-bigquerystorage/issues/1217)) ([2ae3d90](https://www.github.com/googleapis/java-bigquerystorage/commit/2ae3d90d25674e8520bf02c84b16efc7f40bfa83)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2 ([#1219](https://www.github.com/googleapis/java-bigquerystorage/issues/1219)) ([a83da41](https://www.github.com/googleapis/java-bigquerystorage/commit/a83da417b1260ab5086243905603152e5f8e0b97)) + +### [1.23.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.23.1...v1.23.2) (2021-07-29) + + +### Dependencies + +* update arrow.version to v5 ([#1203](https://www.github.com/googleapis/java-bigquerystorage/issues/1203)) ([d56906e](https://www.github.com/googleapis/java-bigquerystorage/commit/d56906e10cc8109987358bcd18d26ac41a0cbdd9)) + +### [1.23.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.23.0...v1.23.1) (2021-07-22) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.137.1 ([#1190](https://www.github.com/googleapis/java-bigquerystorage/issues/1190)) ([116460f](https://www.github.com/googleapis/java-bigquerystorage/commit/116460f722674bbc4b654da179885194dbe37e1b)) + +## [1.23.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.22.8...v1.23.0) (2021-07-14) + + +### Features + +* Expand bytes field type handling ([#1180](https://www.github.com/googleapis/java-bigquerystorage/issues/1180)) ([aab33a5](https://www.github.com/googleapis/java-bigquerystorage/commit/aab33a55f3cb91003363ada1395f07868bc17ee5)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.136.0 ([#1176](https://www.github.com/googleapis/java-bigquerystorage/issues/1176)) ([95c77e2](https://www.github.com/googleapis/java-bigquerystorage/commit/95c77e24f2ed8319b0e08194f331a59e975369cc)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.137.0 ([#1184](https://www.github.com/googleapis/java-bigquerystorage/issues/1184)) ([8bf328c](https://www.github.com/googleapis/java-bigquerystorage/commit/8bf328c1e939b46b297c65359433bed8fbda03b2)) + +### [1.22.8](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.22.7...v1.22.8) (2021-07-07) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-bigquery to v2-rev20210617-1.32.1 ([#1168](https://www.github.com/googleapis/java-bigquerystorage/issues/1168)) ([61d52f0](https://www.github.com/googleapis/java-bigquerystorage/commit/61d52f02a91b698e2996b491f2da7eeba1dd7484)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.135.3 ([#1167](https://www.github.com/googleapis/java-bigquerystorage/issues/1167)) ([14bdcc4](https://www.github.com/googleapis/java-bigquerystorage/commit/14bdcc4809cb5cb39cc063fd8196e1c8a141c7fb)) + +### [1.22.7](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.22.6...v1.22.7) (2021-07-01) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.135.2 ([#1163](https://www.github.com/googleapis/java-bigquerystorage/issues/1163)) ([9c11919](https://www.github.com/googleapis/java-bigquerystorage/commit/9c119195a4f64f8d737f71b47d7d2579d014dfd7)) + +### [1.22.6](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.22.5...v1.22.6) (2021-06-30) + + +### Documentation + +* **sample:** add IT for WriteToDefaultStream ([#1158](https://www.github.com/googleapis/java-bigquerystorage/issues/1158)) ([3280d7a](https://www.github.com/googleapis/java-bigquerystorage/commit/3280d7a0495a25f2509c9d4719439253e0fbaa5d)), closes [#1156](https://www.github.com/googleapis/java-bigquerystorage/issues/1156) +* user-provided streamOrTableName for default write streams ([#1155](https://www.github.com/googleapis/java-bigquerystorage/issues/1155)) ([fc4a5c0](https://www.github.com/googleapis/java-bigquerystorage/commit/fc4a5c061f51d83e41109ebf04f4202276410e30)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.135.1 ([#1154](https://www.github.com/googleapis/java-bigquerystorage/issues/1154)) ([99f8ebb](https://www.github.com/googleapis/java-bigquerystorage/commit/99f8ebb2fde1fd86315a03dea5719e7a7744d081)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.4.0 ([#1159](https://www.github.com/googleapis/java-bigquerystorage/issues/1159)) ([7e1b206](https://www.github.com/googleapis/java-bigquerystorage/commit/7e1b2062f8cb13c05270d86c9ff6863934e7478d)) + +### [1.22.5](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.22.4...v1.22.5) (2021-06-28) + + +### Dependencies + +* update dependency com.google.apis:google-api-services-bigquery to v2-rev20210617-1.31.5 ([#1147](https://www.github.com/googleapis/java-bigquerystorage/issues/1147)) ([0f4605f](https://www.github.com/googleapis/java-bigquerystorage/commit/0f4605ff5a5cb2cf8f2b961109c5a03a80508993)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.134.0 ([#1146](https://www.github.com/googleapis/java-bigquerystorage/issues/1146)) ([c7b3ca1](https://www.github.com/googleapis/java-bigquerystorage/commit/c7b3ca1894c7a6a9920887d05e6134cb7a9a0cfb)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.135.0 ([#1149](https://www.github.com/googleapis/java-bigquerystorage/issues/1149)) ([f220560](https://www.github.com/googleapis/java-bigquerystorage/commit/f220560a8c967ff2e09d044e377420d18b504be7)) + +### [1.22.4](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.22.3...v1.22.4) (2021-06-24) + + +### Bug Fixes + +* Add `shopt -s nullglob` to dependencies script ([#1139](https://www.github.com/googleapis/java-bigquerystorage/issues/1139)) ([fef0aca](https://www.github.com/googleapis/java-bigquerystorage/commit/fef0acac23548ab521fcd98cde19ca1d022fad2b)) +* Add google-api-services-bigquery to google-cloud-bigquerystorage-bom ([#1142](https://www.github.com/googleapis/java-bigquerystorage/issues/1142)) ([4e97130](https://www.github.com/googleapis/java-bigquerystorage/commit/4e97130dd1f703d0f710f27fceb577cef1b06b93)) +* Update dependencies.sh to not break on mac ([#1134](https://www.github.com/googleapis/java-bigquerystorage/issues/1134)) ([85f8c9a](https://www.github.com/googleapis/java-bigquerystorage/commit/85f8c9af7928325fa3c32ec3896b8e3f87279901)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.133.1 ([#1136](https://www.github.com/googleapis/java-bigquerystorage/issues/1136)) ([7248e45](https://www.github.com/googleapis/java-bigquerystorage/commit/7248e459b7c9cdeac573f4bd03cb618d138a7618)) + +### [1.22.3](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.22.2...v1.22.3) (2021-06-11) + + +### Bug Fixes + +* Make EOS Matching more robust ([#1128](https://www.github.com/googleapis/java-bigquerystorage/issues/1128)) ([3b5cdbe](https://www.github.com/googleapis/java-bigquerystorage/commit/3b5cdbe769fcebeebc571922c84c129066e76aaf)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.133.0 ([#1124](https://www.github.com/googleapis/java-bigquerystorage/issues/1124)) ([aea80e9](https://www.github.com/googleapis/java-bigquerystorage/commit/aea80e930ac5a9cb5fa8f751c977166d45f1341f)) + +### [1.22.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.22.1...v1.22.2) (2021-06-08) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.132.1 ([#1114](https://www.github.com/googleapis/java-bigquerystorage/issues/1114)) ([845078e](https://www.github.com/googleapis/java-bigquerystorage/commit/845078e3f4e8cb273b893a11aaeb61f98120e10e)) + +### [1.22.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.22.0...v1.22.1) (2021-06-05) + + +### Bug Fixes + +* call awaitTermination when close the stream writer ([#1111](https://www.github.com/googleapis/java-bigquerystorage/issues/1111)) ([0f7db50](https://www.github.com/googleapis/java-bigquerystorage/commit/0f7db50bdd8f653f8087550cb2d1eeb47975eadf)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.132.0 ([#1109](https://www.github.com/googleapis/java-bigquerystorage/issues/1109)) ([fc91b02](https://www.github.com/googleapis/java-bigquerystorage/commit/fc91b02a5361b7d80191489405967182445e3b44)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.3.0 ([#1108](https://www.github.com/googleapis/java-bigquerystorage/issues/1108)) ([05a936a](https://www.github.com/googleapis/java-bigquerystorage/commit/05a936a7c34c495010d18c2038d08d2662420958)) + +## [1.22.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.21.1...v1.22.0) (2021-05-31) + + +### ⚠ BREAKING CHANGES + +* remove default deadline for AppendRows API (#1101) + +### Features + +* add `gcf-owl-bot[bot]` to `ignoreAuthors` ([#1092](https://www.github.com/googleapis/java-bigquerystorage/issues/1092)) ([9fe34d1](https://www.github.com/googleapis/java-bigquerystorage/commit/9fe34d1a7ea2b4c1f750fd8805251811a4b0111d)) + + +### Bug Fixes + +* More robust STREAM_RST logic ([#1102](https://www.github.com/googleapis/java-bigquerystorage/issues/1102)) ([dd67534](https://www.github.com/googleapis/java-bigquerystorage/commit/dd675343b20d614eaf715306f1151532a3a2c33f)) +* remove default deadline for AppendRows API ([#1101](https://www.github.com/googleapis/java-bigquerystorage/issues/1101)) ([87cadf0](https://www.github.com/googleapis/java-bigquerystorage/commit/87cadf01edbdaf876699e98f027835d6594f8072)) + + +### Dependencies + +* update arrow.version to v4.0.1 ([#1103](https://www.github.com/googleapis/java-bigquerystorage/issues/1103)) ([b2e3489](https://www.github.com/googleapis/java-bigquerystorage/commit/b2e34894f443075ce375b822babcfc329b34c76c)) +* update dependency com.google.truth:truth to v1.1.3 ([#1100](https://www.github.com/googleapis/java-bigquerystorage/issues/1100)) ([12c401f](https://www.github.com/googleapis/java-bigquerystorage/commit/12c401feec1fb7fbaf39ea7ccffee4a02faffeb3)) + +### [1.21.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.21.0...v1.21.1) (2021-05-19) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.131.1 ([#1087](https://www.github.com/googleapis/java-bigquerystorage/issues/1087)) ([fd418bd](https://www.github.com/googleapis/java-bigquerystorage/commit/fd418bd525e929bb75b340137a95f06ea0977134)) + +## [1.21.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.20.6...v1.21.0) (2021-05-19) + + +### Features + +* Add ZSTD compression as an option for Arrow proto changes ([d910a89](https://www.github.com/googleapis/java-bigquerystorage/commit/d910a8903d0eed6e8a8a4b7183dc7a0f401f66da)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.131.0 ([#1083](https://www.github.com/googleapis/java-bigquerystorage/issues/1083)) ([70ff6d5](https://www.github.com/googleapis/java-bigquerystorage/commit/70ff6d5ffaaa3ab3a2d6c940403b96ab6f711312)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.2.0 ([#1084](https://www.github.com/googleapis/java-bigquerystorage/issues/1084)) ([99fae33](https://www.github.com/googleapis/java-bigquerystorage/commit/99fae331b2d97ff63b61750d7284cd04f6abc2e6)) + +### [1.20.6](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.20.5...v1.20.6) (2021-05-18) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.130.0 ([#1075](https://www.github.com/googleapis/java-bigquerystorage/issues/1075)) ([4cf3901](https://www.github.com/googleapis/java-bigquerystorage/commit/4cf39012afef9b2211aabbda891ffacba4344fb7)) + +### [1.20.5](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.20.4...v1.20.5) (2021-05-13) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.129.0 ([#1065](https://www.github.com/googleapis/java-bigquerystorage/issues/1065)) ([b94e01f](https://www.github.com/googleapis/java-bigquerystorage/commit/b94e01f473dd99c34cc942a2f649973ba6325cb2)) + +### [1.20.4](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.20.3...v1.20.4) (2021-05-11) + + +### Bug Fixes + +* migrate json writer to use StreamWriterV2 ([#1058](https://www.github.com/googleapis/java-bigquerystorage/issues/1058)) ([586777f](https://www.github.com/googleapis/java-bigquerystorage/commit/586777f289f3dc84d2d6237463f128d278d2465f)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.1.0 ([#1061](https://www.github.com/googleapis/java-bigquerystorage/issues/1061)) ([d6c3146](https://www.github.com/googleapis/java-bigquerystorage/commit/d6c3146d21282d1e0724583f68a31184d2d7167b)) + +### [1.20.3](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.20.2...v1.20.3) (2021-05-04) + + +### Bug Fixes + +* remove schema update capability from jsonwriter and delete related tests ([#1047](https://www.github.com/googleapis/java-bigquerystorage/issues/1047)) ([21e399b](https://www.github.com/googleapis/java-bigquerystorage/commit/21e399bf4fca9b3ef84443ae1d32a6bfe393b61b)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.128.3 ([#1045](https://www.github.com/googleapis/java-bigquerystorage/issues/1045)) ([68beb48](https://www.github.com/googleapis/java-bigquerystorage/commit/68beb4896e5246389c216841dec519a9915362c3)) + +### [1.20.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.20.1...v1.20.2) (2021-04-27) + + +### Dependencies + +* update arrow.version to v4 ([#1035](https://www.github.com/googleapis/java-bigquerystorage/issues/1035)) ([8e2225c](https://www.github.com/googleapis/java-bigquerystorage/commit/8e2225cf791a17deb674b12586e3ea041358d49c)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.128.1 ([#1033](https://www.github.com/googleapis/java-bigquerystorage/issues/1033)) ([776c670](https://www.github.com/googleapis/java-bigquerystorage/commit/776c670267d2c9bbc730c41f8187ac853b8525d3)) + +### [1.20.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.20.0...v1.20.1) (2021-04-24) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1 ([#1030](https://www.github.com/googleapis/java-bigquerystorage/issues/1030)) ([7a00340](https://www.github.com/googleapis/java-bigquerystorage/commit/7a003405cfe301f3729cb373fe7f02da12506369)) + +## [1.20.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.19.0...v1.20.0) (2021-04-20) + + +### Features + +* new JSON type through BigQuery Write ([#1013](https://www.github.com/googleapis/java-bigquerystorage/issues/1013)) ([357e811](https://www.github.com/googleapis/java-bigquerystorage/commit/357e811f5d429cacfce597be9a3e739e659f604a)) + + +### Bug Fixes + +* It seems GAPIC is not generated expected headers for bidi streaming client lib, apply a temp fix to unblock customers ([#1017](https://www.github.com/googleapis/java-bigquerystorage/issues/1017)) ([9c1ed55](https://www.github.com/googleapis/java-bigquerystorage/commit/9c1ed556c7e3a545932ec8ceff7b3c466e72d84f)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.21.1 ([#1018](https://www.github.com/googleapis/java-bigquerystorage/issues/1018)) ([827764e](https://www.github.com/googleapis/java-bigquerystorage/commit/827764ef215bdb73ccff0a2b9329c2295e5eef1f)) + +## [1.19.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.18.1...v1.19.0) (2021-04-15) + + +### ⚠ BREAKING CHANGES + +* remove v1alpha2 library (#1003) + +### Bug Fixes + +* release scripts from issuing overlapping phases ([#1006](https://www.github.com/googleapis/java-bigquerystorage/issues/1006)) ([f338201](https://www.github.com/googleapis/java-bigquerystorage/commit/f3382013fa4e14eba89ecbc32bdad813b2880254)) + + +### Code Refactoring + +* remove v1alpha2 library ([#1003](https://www.github.com/googleapis/java-bigquerystorage/issues/1003)) ([98cd924](https://www.github.com/googleapis/java-bigquerystorage/commit/98cd924e335441f633ad87a0aed118de6951260c)) + +### [1.18.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.18.0...v1.18.1) (2021-04-10) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.128.0 ([#993](https://www.github.com/googleapis/java-bigquerystorage/issues/993)) ([42989b5](https://www.github.com/googleapis/java-bigquerystorage/commit/42989b55b2fa9e2f2ce0a41092c9c151fb529ed6)) + +## [1.18.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.17.0...v1.18.0) (2021-04-09) + + +### Features + +* add deprecated annotation and tag to bq storage alpha api ([#978](https://www.github.com/googleapis/java-bigquerystorage/issues/978)) ([c7da342](https://www.github.com/googleapis/java-bigquerystorage/commit/c7da34252ee8c243be3ce737d03e1e12f10a5eba)) +* re-generated to pick up changes from googleapis. ([#982](https://www.github.com/googleapis/java-bigquerystorage/issues/982)) ([17bfbd8](https://www.github.com/googleapis/java-bigquerystorage/commit/17bfbd8dffd854356ea503adf3a6e065f1e1a4ee)) + + +### Bug Fixes + +* Cleanup JsonWriter bytes conversion code and add some test coverage ([#984](https://www.github.com/googleapis/java-bigquerystorage/issues/984)) ([e43df34](https://www.github.com/googleapis/java-bigquerystorage/commit/e43df3403c4d7644032cef6e1f1cf59e6ec1b5eb)) + + +### Documentation + +* update region tag for the storage quickstart ([#985](https://www.github.com/googleapis/java-bigquerystorage/issues/985)) ([1d0d6c3](https://www.github.com/googleapis/java-bigquerystorage/commit/1d0d6c3a877943b83775430d5a2bf7fcd24a1f21)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.127.12 ([#986](https://www.github.com/googleapis/java-bigquerystorage/issues/986)) ([d39bd79](https://www.github.com/googleapis/java-bigquerystorage/commit/d39bd79a69e987baa7721b907cdccb59d1ea4a74)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.21.0 ([#987](https://www.github.com/googleapis/java-bigquerystorage/issues/987)) ([4fa3dbb](https://www.github.com/googleapis/java-bigquerystorage/commit/4fa3dbba64586f6bf943ba67f225fad1b994e5a7)) + +## [1.17.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.16.1...v1.17.0) (2021-03-30) + + +### Features + +* add a Arrow compression options (Only LZ4 for now) ([#972](https://www.github.com/googleapis/java-bigquerystorage/issues/972)) ([de0c0d2](https://www.github.com/googleapis/java-bigquerystorage/commit/de0c0d2c0a5e8b85a0f2aa5679a22fb8846a5fd0)) +* BigDecimal and ByteString encoding ([#971](https://www.github.com/googleapis/java-bigquerystorage/issues/971)) ([82b556e](https://www.github.com/googleapis/java-bigquerystorage/commit/82b556e08d19a4dd969bda53409276c6408a4126)) + +### [1.16.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.16.0...v1.16.1) (2021-03-26) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.127.11 ([#962](https://www.github.com/googleapis/java-bigquerystorage/issues/962)) ([87a821e](https://www.github.com/googleapis/java-bigquerystorage/commit/87a821ec7a6d6cf4fd2a214c1bbdc3691351ba61)) + +## [1.16.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.15.1...v1.16.0) (2021-03-25) + + +### Features + +* Add CivilTimeEncoder to encode and decode DateTime/Time as numerics ([#937](https://www.github.com/googleapis/java-bigquerystorage/issues/937)) ([969b429](https://www.github.com/googleapis/java-bigquerystorage/commit/969b4290b9934b94b1a0113e04e37ff44b2a536e)) + + +### Bug Fixes + +* add a deprecation message on StreamWriter ([#922](https://www.github.com/googleapis/java-bigquerystorage/issues/922)) ([fce5289](https://www.github.com/googleapis/java-bigquerystorage/commit/fce52890c6948a9b78a62d2fe0e4f9768d10d401)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.127.10 ([#955](https://www.github.com/googleapis/java-bigquerystorage/issues/955)) ([c810c72](https://www.github.com/googleapis/java-bigquerystorage/commit/c810c7279bfbad31cb0f94f5ad5d4a74342d4481)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.127.9 ([#947](https://www.github.com/googleapis/java-bigquerystorage/issues/947)) ([d781dc5](https://www.github.com/googleapis/java-bigquerystorage/commit/d781dc5479602fee01eb971033978317e5669694)) + + +### Documentation + +* **samples:** Check for error from BatchCommitWriteStreams ([#940](https://www.github.com/googleapis/java-bigquerystorage/issues/940)) ([ab3c145](https://www.github.com/googleapis/java-bigquerystorage/commit/ab3c1453d3c1fb627e773d0e7ca4ec991f8d38b7)) + +### [1.15.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.15.0...v1.15.1) (2021-03-17) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.127.8 ([#934](https://www.github.com/googleapis/java-bigquerystorage/issues/934)) ([626ae23](https://www.github.com/googleapis/java-bigquerystorage/commit/626ae2338a08aa9933badcd32d926cd51515e1e5)) +* update dependency org.apache.avro:avro to v1.10.2 ([#942](https://www.github.com/googleapis/java-bigquerystorage/issues/942)) ([1554247](https://www.github.com/googleapis/java-bigquerystorage/commit/1554247cf55aa56281a530c721ab1650699a3efc)) + +## [1.15.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.14.0...v1.15.0) (2021-03-09) + + +### ⚠ BREAKING CHANGES + +* remove deprecated append method in StreamWriterV2 (#924) + +### Features + +* remove deprecated append method in StreamWriterV2 ([#924](https://www.github.com/googleapis/java-bigquerystorage/issues/924)) ([f278775](https://www.github.com/googleapis/java-bigquerystorage/commit/f2787751bf03d91d1575ee6e007f407f91d97157)) + + +### Bug Fixes + +* Revive schema update e2e test and adjust some test names ([#921](https://www.github.com/googleapis/java-bigquerystorage/issues/921)) ([dd392e5](https://www.github.com/googleapis/java-bigquerystorage/commit/dd392e54953e0b75e780532a4dab2d143b8d8665)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.20.1 ([#931](https://www.github.com/googleapis/java-bigquerystorage/issues/931)) ([8c716c5](https://www.github.com/googleapis/java-bigquerystorage/commit/8c716c5d26420ef03f76a302c76892894045d4ad)) + +## [1.14.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.13.0...v1.14.0) (2021-03-04) + + +### Features + +* update StreamWriterV2 to support trace id ([#895](https://www.github.com/googleapis/java-bigquerystorage/issues/895)) ([2e49ce8](https://www.github.com/googleapis/java-bigquerystorage/commit/2e49ce8c79cb059840c3307898ba16980f6892fa)) + + +### Bug Fixes + +* add schema update back to json writer ([#905](https://www.github.com/googleapis/java-bigquerystorage/issues/905)) ([a2adbf8](https://www.github.com/googleapis/java-bigquerystorage/commit/a2adbf80753161cbddd23d5a7db75e9250db58fa)) +* Add unit test for concurrent issues we worried about, and fix some locking issues ([#854](https://www.github.com/googleapis/java-bigquerystorage/issues/854)) ([0870797](https://www.github.com/googleapis/java-bigquerystorage/commit/087079728195e20f93701e8d5e1e59ba29a7d21b)) +* test failure testAppendWhileShutdownSuccess ([#904](https://www.github.com/googleapis/java-bigquerystorage/issues/904)) ([b80183e](https://www.github.com/googleapis/java-bigquerystorage/commit/b80183ea23c8b78611a42d22d8c62a4ba4904a80)) +* testAppendWhileShutdownSuccess race ([#907](https://www.github.com/googleapis/java-bigquerystorage/issues/907)) ([d39443d](https://www.github.com/googleapis/java-bigquerystorage/commit/d39443d51d2625e4b3aee59d1e593229e9e449d3)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.127.6 ([#909](https://www.github.com/googleapis/java-bigquerystorage/issues/909)) ([505938b](https://www.github.com/googleapis/java-bigquerystorage/commit/505938bcba5a4a7af9e618572bbc41f365702f47)) + +## [1.13.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.12.0...v1.13.0) (2021-03-01) + + +### Features + +* **generator:** update protoc to v3.15.3 ([#898](https://www.github.com/googleapis/java-bigquerystorage/issues/898)) ([2f277d6](https://www.github.com/googleapis/java-bigquerystorage/commit/2f277d650e8f617c6253843baf73d5d220713a61)) +* in StreamWriterV2, supports new append, which takes rows and offset ([#894](https://www.github.com/googleapis/java-bigquerystorage/issues/894)) ([f3865b0](https://www.github.com/googleapis/java-bigquerystorage/commit/f3865b06ea7c61e95d3ee9bc7b46857d9d3080cc)) +* StreamWriterV2 will handle schema/streamName attachment ([#877](https://www.github.com/googleapis/java-bigquerystorage/issues/877)) ([c54bcfe](https://www.github.com/googleapis/java-bigquerystorage/commit/c54bcfec1706eef58eaf9dad8b49dc79fc8da133)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.127.5 ([#896](https://www.github.com/googleapis/java-bigquerystorage/issues/896)) ([d211c76](https://www.github.com/googleapis/java-bigquerystorage/commit/d211c76dff747121d4560b55818c10bf595ef1c3)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.20.0 ([#892](https://www.github.com/googleapis/java-bigquerystorage/issues/892)) ([438f1c3](https://www.github.com/googleapis/java-bigquerystorage/commit/438f1c3b551e6b97a3241c69f2006a5a6be78c4f)) + +## [1.12.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.11.0...v1.12.0) (2021-02-25) + + +### Features + +* StreamWriterV2 sets exception for response with error ([#884](https://www.github.com/googleapis/java-bigquerystorage/issues/884)) ([4677d7b](https://www.github.com/googleapis/java-bigquerystorage/commit/4677d7bd56db6c76106daeb7be38fa65f1c9c745)) + + +### Documentation + +* **sample:** Update parallel append sample to use StreamWriterV2 ([#883](https://www.github.com/googleapis/java-bigquerystorage/issues/883)) ([5c5c690](https://www.github.com/googleapis/java-bigquerystorage/commit/5c5c690fe39b3055596712dbd39064aed0b023da)) + +## [1.11.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.10.0...v1.11.0) (2021-02-24) + + +### Features + +* Add max size check to StreamWriterV2 ([#873](https://www.github.com/googleapis/java-bigquerystorage/issues/873)) ([0261af4](https://www.github.com/googleapis/java-bigquerystorage/commit/0261af4e2dff5fd8515109c6336796815acb6c3c)) +* Support building a BigQueryWriteClient within the StreamWriterV2 ([#876](https://www.github.com/googleapis/java-bigquerystorage/issues/876)) ([237c827](https://www.github.com/googleapis/java-bigquerystorage/commit/237c82711649672de1ce2e7382b909b74c0cd709)) +* Support inflight control in StreamWriterV2 ([#875](https://www.github.com/googleapis/java-bigquerystorage/issues/875)) ([854c81e](https://www.github.com/googleapis/java-bigquerystorage/commit/854c81e20c3651295fd2dd5c9e87f48c67a9eeff)) + + +### Documentation + +* **samples:** add a new sample for parallel append ([#863](https://www.github.com/googleapis/java-bigquerystorage/issues/863)) ([ed1c265](https://www.github.com/googleapis/java-bigquerystorage/commit/ed1c26588ceb3e0d852fabefe8b3a89b8c74e6dd)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.127.4 ([#869](https://www.github.com/googleapis/java-bigquerystorage/issues/869)) ([de747ec](https://www.github.com/googleapis/java-bigquerystorage/commit/de747ec971f28919438b14c9f77ec7a2fa3a5628)) + +## [1.10.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.9.1...v1.10.0) (2021-02-23) + + +### Features + +* Create Stream writer v2 - starting with core logics ([#867](https://www.github.com/googleapis/java-bigquerystorage/issues/867)) ([7c01f45](https://www.github.com/googleapis/java-bigquerystorage/commit/7c01f45ded1c516c23954b88103ca80b2132ecfc)) + + +### Bug Fixes + +* Remove flushAll method ([#850](https://www.github.com/googleapis/java-bigquerystorage/issues/850)) ([33a4502](https://www.github.com/googleapis/java-bigquerystorage/commit/33a450286b999c41459f92dd0177239f2a1b1f9a)) +* temporally disable refreshAppend ([#853](https://www.github.com/googleapis/java-bigquerystorage/issues/853)) ([baf973d](https://www.github.com/googleapis/java-bigquerystorage/commit/baf973d84577cd490e275f6eebf91e25d5c34ccc)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.127.3 ([#861](https://www.github.com/googleapis/java-bigquerystorage/issues/861)) ([da95d18](https://www.github.com/googleapis/java-bigquerystorage/commit/da95d18a265950de437386870e5f1dba3be75743)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.19.0 ([#862](https://www.github.com/googleapis/java-bigquerystorage/issues/862)) ([4e390d7](https://www.github.com/googleapis/java-bigquerystorage/commit/4e390d7f5ca80ce87bfc0be3ce4ef4a33f9735ab)) + +### [1.9.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.9.0...v1.9.1) (2021-02-18) + + +### Bug Fixes + +* remove reconnection feature from client library ([#849](https://www.github.com/googleapis/java-bigquerystorage/issues/849)) ([86dfc3a](https://www.github.com/googleapis/java-bigquerystorage/commit/86dfc3a1b1e3429a1e0932919a300a3bbbcb1ceb)) + +## [1.9.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.8.5...v1.9.0) (2021-02-17) + + +### Features + +* generate sample code in the Java microgenerator ([#821](https://www.github.com/googleapis/java-bigquerystorage/issues/821)) ([3ab9a30](https://www.github.com/googleapis/java-bigquerystorage/commit/3ab9a309441a681e8adec88f8b8af1298a89cd74)) + + +### Bug Fixes + +* add dataflow trace id support ([#827](https://www.github.com/googleapis/java-bigquerystorage/issues/827)) ([8d22c58](https://www.github.com/googleapis/java-bigquerystorage/commit/8d22c586ed8a265558b84619297f5bd5b5ec4cb4)) +* shutdown stuck when there is error on the flush path ([#831](https://www.github.com/googleapis/java-bigquerystorage/issues/831)) ([c2fd750](https://www.github.com/googleapis/java-bigquerystorage/commit/c2fd750e1309fb7b6eb862dea1ad8546dcd78bef)) +* update repo name ([#818](https://www.github.com/googleapis/java-bigquerystorage/issues/818)) ([ba4b1a2](https://www.github.com/googleapis/java-bigquerystorage/commit/ba4b1a2d8eaa8f5408e476a3b570cb508aa94f57)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.127.0 ([#825](https://www.github.com/googleapis/java-bigquerystorage/issues/825)) ([36322fb](https://www.github.com/googleapis/java-bigquerystorage/commit/36322fb1f3d3d286cf8fc6b66f5aa1b97e754d72)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.127.1 ([#828](https://www.github.com/googleapis/java-bigquerystorage/issues/828)) ([c24a6ae](https://www.github.com/googleapis/java-bigquerystorage/commit/c24a6ae3d5a091b5276036d1d7f01e2777bbfd82)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.127.2 ([#830](https://www.github.com/googleapis/java-bigquerystorage/issues/830)) ([94c7848](https://www.github.com/googleapis/java-bigquerystorage/commit/94c78485baa383cd41ee7001206d5bc19f7746bf)) +* update dependency junit:junit to v4.13.2 ([#829](https://www.github.com/googleapis/java-bigquerystorage/issues/829)) ([c2e429a](https://www.github.com/googleapis/java-bigquerystorage/commit/c2e429a591131cd2c89982c746f860a8fd0c7aef)) + +### [1.8.5](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.8.4...v1.8.5) (2021-01-17) + + +### Bug Fixes + +* StreamWriter hang when we reach the inflight limit control and is doing a retry ([#799](https://www.github.com/googleapis/java-bigquerystorage/issues/799)) ([f8f9770](https://www.github.com/googleapis/java-bigquerystorage/commit/f8f97701e5ca698a170a1d3b6ecb3886e186f9d5)) + +### [1.8.4](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.8.3...v1.8.4) (2021-01-14) + + +### Bug Fixes + +* default stream integration test failures due to production expected change ([#791](https://www.github.com/googleapis/java-bigquerystorage/issues/791)) ([1c2b5c1](https://www.github.com/googleapis/java-bigquerystorage/commit/1c2b5c1ef478305fe7f3d9f1843750cec18ba9f8)) + + +### Documentation + +* **samples:** jsonstreamwriter samples ([#756](https://www.github.com/googleapis/java-bigquerystorage/issues/756)) ([929b2ce](https://www.github.com/googleapis/java-bigquerystorage/commit/929b2cea1951bbe45eea596163f9a7a74d0ab041)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.126.6 ([#794](https://www.github.com/googleapis/java-bigquerystorage/issues/794)) ([8e68546](https://www.github.com/googleapis/java-bigquerystorage/commit/8e68546f1e86553919766f9333ad911ba7da8442)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.18.0 ([#795](https://www.github.com/googleapis/java-bigquerystorage/issues/795)) ([86036bb](https://www.github.com/googleapis/java-bigquerystorage/commit/86036bb5caca125b38a64bd63acc5486a87b8e35)) +* update protobuf ([#790](https://www.github.com/googleapis/java-bigquerystorage/issues/790)) ([792e925](https://www.github.com/googleapis/java-bigquerystorage/commit/792e925840e99033a1f194b2bfb372dae79d3d0d)) + +### [1.8.3](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.8.2...v1.8.3) (2021-01-12) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.126.4 ([#782](https://www.github.com/googleapis/java-bigquerystorage/issues/782)) ([ced40d0](https://www.github.com/googleapis/java-bigquerystorage/commit/ced40d09ed6f84ab86f83bec009b6d29f8d65358)) + +### [1.8.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.8.1...v1.8.2) (2021-01-12) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.17.1 ([#778](https://www.github.com/googleapis/java-bigquerystorage/issues/778)) ([b4099d0](https://www.github.com/googleapis/java-bigquerystorage/commit/b4099d0bcd80a650d491996b3dc3def81bdd97ef)) + +### [1.8.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.8.0...v1.8.1) (2021-01-11) + + +### Dependencies + +* update dependency com.fasterxml.jackson.core:jackson-core to v2.12.1 ([#770](https://www.github.com/googleapis/java-bigquerystorage/issues/770)) ([2f276bd](https://www.github.com/googleapis/java-bigquerystorage/commit/2f276bdf254a06f38a87d9ab93bd73334dd19927)) + +## [1.8.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.7.2...v1.8.0) (2021-01-05) + + +### Features + +* add default stream support for StreamWriter ([#744](https://www.github.com/googleapis/java-bigquerystorage/issues/744)) ([248ab73](https://www.github.com/googleapis/java-bigquerystorage/commit/248ab73eebb9feda94e7e29de591a70c91a064c1)) +* better default stream support in client library ([#750](https://www.github.com/googleapis/java-bigquerystorage/issues/750)) ([488f258](https://www.github.com/googleapis/java-bigquerystorage/commit/488f2589a793ef9efcab64a9bed9af05f5b1620d)), closes [#749](https://www.github.com/googleapis/java-bigquerystorage/issues/749) [#746](https://www.github.com/googleapis/java-bigquerystorage/issues/746) +* remove IgnoreUnknownFields support on JsonStreamWriter ([#757](https://www.github.com/googleapis/java-bigquerystorage/issues/757)) ([0988105](https://www.github.com/googleapis/java-bigquerystorage/commit/09881051a98f7d1675c3ec0850ef36dbe2ffa481)) +* updates to Write API v1beta2 public interface, migrate to Java microgenerator ([#728](https://www.github.com/googleapis/java-bigquerystorage/issues/728)) ([2fc5968](https://www.github.com/googleapis/java-bigquerystorage/commit/2fc59689dfe64be6c23104493f690f476a0cda7f)) + + +### Bug Fixes + +* An extra test that is still calling old JsonStreamWriter append ([#760](https://www.github.com/googleapis/java-bigquerystorage/issues/760)) ([af72e6e](https://www.github.com/googleapis/java-bigquerystorage/commit/af72e6e9b4100e4f47fad4139437a04aa3ee7535)) +* enable tests that are disabled due to breaking change and stop ignoring ALREADY_EXISTED error ([#748](https://www.github.com/googleapis/java-bigquerystorage/issues/748)) ([8caf5a2](https://www.github.com/googleapis/java-bigquerystorage/commit/8caf5a26baf865c55405363f9d78756bce2d5219)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.126.3 ([#739](https://www.github.com/googleapis/java-bigquerystorage/issues/739)) ([29f2bac](https://www.github.com/googleapis/java-bigquerystorage/commit/29f2bacb49e977d2e2007123c0c935add264cbc0)) + +### [1.7.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.7.1...v1.7.2) (2020-12-15) + + +### Bug Fixes + +* Disable Breaking change related code site ([#731](https://www.github.com/googleapis/java-bigquerystorage/issues/731)) ([d180293](https://www.github.com/googleapis/java-bigquerystorage/commit/d180293b47484c6257ae065f7ea2a5a6be8a5383)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.17.0 ([#735](https://www.github.com/googleapis/java-bigquerystorage/issues/735)) ([131d7a7](https://www.github.com/googleapis/java-bigquerystorage/commit/131d7a7626adebf9a0c97ffb2b0183a55086b5f7)) + +### [1.7.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.7.0...v1.7.1) (2020-12-14) + + +### Bug Fixes + +* a build break caused by breaking change. ([#729](https://www.github.com/googleapis/java-bigquerystorage/issues/729)) ([a1b1c8e](https://www.github.com/googleapis/java-bigquerystorage/commit/a1b1c8edce87a8d33faf1067f617e3f47c4afc45)) +* A test race in JsonStreamWriterTest ([#722](https://www.github.com/googleapis/java-bigquerystorage/issues/722)) ([35fe606](https://www.github.com/googleapis/java-bigquerystorage/commit/35fe606e43a47dfe844666bf3e385f9c2e582977)) +* Add special type tests for Json writer and fix some type mapping issues ([#725](https://www.github.com/googleapis/java-bigquerystorage/issues/725)) ([ab6213c](https://www.github.com/googleapis/java-bigquerystorage/commit/ab6213c244162c74242d3aaf543bfcf9b2eb4405)) +* Temporally disable test/code where breaking change is used to help pushing out the breaking change in unreleased Beta ([#727](https://www.github.com/googleapis/java-bigquerystorage/issues/727)) ([38c95c2](https://www.github.com/googleapis/java-bigquerystorage/commit/38c95c23459eb9c4a7215b1a3cb5243165f79815)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.16.1 ([d70bc3e](https://www.github.com/googleapis/java-bigquerystorage/commit/d70bc3e211ec579129ba53679572b49187f6b6e5)) + +## [1.7.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.6.7...v1.7.0) (2020-12-09) + + +### Features + +* v1beta2 manual client library for BigQueryWrite ([#714](https://www.github.com/googleapis/java-bigquerystorage/issues/714)) ([53a9c3a](https://www.github.com/googleapis/java-bigquerystorage/commit/53a9c3aaf7e6a17d10ceecaf9cef5eb3e81a58ee)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.126.0 ([#708](https://www.github.com/googleapis/java-bigquerystorage/issues/708)) ([24a98c8](https://www.github.com/googleapis/java-bigquerystorage/commit/24a98c88b5531210efa8ba7b474f89d9b790df16)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.126.1 ([#716](https://www.github.com/googleapis/java-bigquerystorage/issues/716)) ([03aa545](https://www.github.com/googleapis/java-bigquerystorage/commit/03aa5455baf29e10b5938829f340ce865574453d)) + +### [1.6.7](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.6.6...v1.6.7) (2020-12-03) + + +### Dependencies + +* update dependency org.apache.avro:avro to v1.10.1 ([#703](https://www.github.com/googleapis/java-bigquerystorage/issues/703)) ([9b5480a](https://www.github.com/googleapis/java-bigquerystorage/commit/9b5480af03b104c8b3c76e88534e39644570296a)) + +### [1.6.6](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.6.5...v1.6.6) (2020-12-02) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.16.0 ([#696](https://www.github.com/googleapis/java-bigquerystorage/issues/696)) ([dae7ee0](https://www.github.com/googleapis/java-bigquerystorage/commit/dae7ee00839c0f1fce728c19d8974d49a5f4805d)) + +### [1.6.5](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.6.4...v1.6.5) (2020-12-02) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.125.0 ([#682](https://www.github.com/googleapis/java-bigquerystorage/issues/682)) ([978b304](https://www.github.com/googleapis/java-bigquerystorage/commit/978b30429bd7daa4f462f2bcc5a4feacb0a63c5a)) + +### [1.6.4](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.6.3...v1.6.4) (2020-11-16) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.124.6 ([#676](https://www.github.com/googleapis/java-bigquerystorage/issues/676)) ([3f3d665](https://www.github.com/googleapis/java-bigquerystorage/commit/3f3d6651ecd8193580091ab51fc3aaa19e879dec)) + +### [1.6.3](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.6.2...v1.6.3) (2020-11-12) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.124.5 ([#666](https://www.github.com/googleapis/java-bigquerystorage/issues/666)) ([eeccba7](https://www.github.com/googleapis/java-bigquerystorage/commit/eeccba734676aaa873e59b1ebf86726b04e48747)) + +### [1.6.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.6.1...v1.6.2) (2020-11-11) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.124.4 ([#653](https://www.github.com/googleapis/java-bigquerystorage/issues/653)) ([48fb650](https://www.github.com/googleapis/java-bigquerystorage/commit/48fb6502884c0df6c962a7e2be841110e1b6c9ee)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.15.0 ([#662](https://www.github.com/googleapis/java-bigquerystorage/issues/662)) ([6cf80d8](https://www.github.com/googleapis/java-bigquerystorage/commit/6cf80d87f3766359eda172324249bbe6c0461111)) + +### [1.6.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.6.0...v1.6.1) (2020-11-01) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.124.0 ([#644](https://www.github.com/googleapis/java-bigquerystorage/issues/644)) ([1f08141](https://www.github.com/googleapis/java-bigquerystorage/commit/1f08141e865322ea71c5cccfe459eb089d364ee8)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.124.1 ([#648](https://www.github.com/googleapis/java-bigquerystorage/issues/648)) ([8077e9f](https://www.github.com/googleapis/java-bigquerystorage/commit/8077e9f31385ad770585d7d611e4b42f21aa1673)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.14.1 ([#650](https://www.github.com/googleapis/java-bigquerystorage/issues/650)) ([8bc7210](https://www.github.com/googleapis/java-bigquerystorage/commit/8bc72109956c9b26d5ab53ea777d196c267b1681)) + +## [1.6.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.5.6...v1.6.0) (2020-10-27) + + +### Features + +* bigquery Write API V1Beta2 public interface. ([#637](https://www.github.com/googleapis/java-bigquerystorage/issues/637)) ([bb21e7b](https://www.github.com/googleapis/java-bigquerystorage/commit/bb21e7bc2b206e7720855d032889f4bc45121f9a)) + + +### Bug Fixes + +* remove stream ttl in client library, since there is no very clear TTL defined. ([#627](https://www.github.com/googleapis/java-bigquerystorage/issues/627)) ([2ae69b6](https://www.github.com/googleapis/java-bigquerystorage/commit/2ae69b640adc48b79f0aab71c215eb3ef055a34c)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.123.1 ([#632](https://www.github.com/googleapis/java-bigquerystorage/issues/632)) ([7b5fc9b](https://www.github.com/googleapis/java-bigquerystorage/commit/7b5fc9b917035ca3b419b71abb0f9d6a4ca59950)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.123.2 ([#633](https://www.github.com/googleapis/java-bigquerystorage/issues/633)) ([bf6260e](https://www.github.com/googleapis/java-bigquerystorage/commit/bf6260ebe01d040e42f3eb4b3b8262054f9316bf)) + +### [1.5.6](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.5.5...v1.5.6) (2020-10-20) + + +### Bug Fixes + +* Due to backend issues resolved, we no longer need to wait for 5 seconds between reconnection ([#610](https://www.github.com/googleapis/java-bigquerystorage/issues/610)) ([f536fc7](https://www.github.com/googleapis/java-bigquerystorage/commit/f536fc7c998ac7d6c54e7d8b3bb7f9a2d6b5626b)) +* test failure due to message start appending entity ([#609](https://www.github.com/googleapis/java-bigquerystorage/issues/609)) ([63b1762](https://www.github.com/googleapis/java-bigquerystorage/commit/63b17629c62bb06f2651e914ff2988def5107ae7)) + + +### Dependencies + +* update arrow.version to v2 ([#613](https://www.github.com/googleapis/java-bigquerystorage/issues/613)) ([4b07ee2](https://www.github.com/googleapis/java-bigquerystorage/commit/4b07ee26fea5b58fbf18f342c3e9d5220361d45e)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.123.0 ([#618](https://www.github.com/googleapis/java-bigquerystorage/issues/618)) ([d565550](https://www.github.com/googleapis/java-bigquerystorage/commit/d56555042f886b81eb74701f3ed87cf4fea2fc72)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.11.0 ([#601](https://www.github.com/googleapis/java-bigquerystorage/issues/601)) ([0527f44](https://www.github.com/googleapis/java-bigquerystorage/commit/0527f447826e2829801bbdfcd6db55b6e79acb32)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.12.1 ([#615](https://www.github.com/googleapis/java-bigquerystorage/issues/615)) ([dfc6436](https://www.github.com/googleapis/java-bigquerystorage/commit/dfc6436353ec2a00cd4ecb3c47503757177e33a9)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.13.0 ([#619](https://www.github.com/googleapis/java-bigquerystorage/issues/619)) ([4a24bf8](https://www.github.com/googleapis/java-bigquerystorage/commit/4a24bf881109d84318a3911a71cbff2657a61f7d)) +* update dependency com.google.truth:truth to v1.1 ([#617](https://www.github.com/googleapis/java-bigquerystorage/issues/617)) ([bcab936](https://www.github.com/googleapis/java-bigquerystorage/commit/bcab936673226f771dcfd77f07066d67b64e08dd)) +* update dependency junit:junit to v4.13.1 ([#598](https://www.github.com/googleapis/java-bigquerystorage/issues/598)) ([a732ae0](https://www.github.com/googleapis/java-bigquerystorage/commit/a732ae00dff2dde67478e0eb02308096ba6dc192)) + +### [1.5.5](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.5.4...v1.5.5) (2020-10-09) + + +### Bug Fixes + +* exception tests ([#586](https://www.github.com/googleapis/java-bigquerystorage/issues/586)) ([4e3ad9b](https://www.github.com/googleapis/java-bigquerystorage/commit/4e3ad9bf65190b7e68902e4e62ecfd5c3b80d795)) +* remove apache commons lang ([#585](https://www.github.com/googleapis/java-bigquerystorage/issues/585)) ([1c85e8f](https://www.github.com/googleapis/java-bigquerystorage/commit/1c85e8fbf5b2b4e2073c72a86efd83812cabac37)) + + +### Dependencies + +* update dependency com.fasterxml.jackson.core:jackson-core to v2.11.3 ([#580](https://www.github.com/googleapis/java-bigquerystorage/issues/580)) ([8d800fa](https://www.github.com/googleapis/java-bigquerystorage/commit/8d800fac7c7dbf1d44cdfa350779be87970a10da)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.122.0 ([#579](https://www.github.com/googleapis/java-bigquerystorage/issues/579)) ([0bf5220](https://www.github.com/googleapis/java-bigquerystorage/commit/0bf5220009360ac7ccba6463d32b842bc9158c7b)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.122.1 ([#588](https://www.github.com/googleapis/java-bigquerystorage/issues/588)) ([496205e](https://www.github.com/googleapis/java-bigquerystorage/commit/496205e43e10b91fadbf958fcc9135210ff98ca4)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.122.2 ([#593](https://www.github.com/googleapis/java-bigquerystorage/issues/593)) ([abe21f0](https://www.github.com/googleapis/java-bigquerystorage/commit/abe21f02f811391259ebe921fff194a3a0394032)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.10.1 ([#589](https://www.github.com/googleapis/java-bigquerystorage/issues/589)) ([e89c92b](https://www.github.com/googleapis/java-bigquerystorage/commit/e89c92bf2b88d4020d31fba707ed69d78e9b74bc)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.10.2 ([#592](https://www.github.com/googleapis/java-bigquerystorage/issues/592)) ([33c460f](https://www.github.com/googleapis/java-bigquerystorage/commit/33c460fa0658307696dbdc7f381ede8df933b0cb)) + +### [1.5.4](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.5.3...v1.5.4) (2020-09-24) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.10.0 ([#563](https://www.github.com/googleapis/java-bigquerystorage/issues/563)) ([d78ed84](https://www.github.com/googleapis/java-bigquerystorage/commit/d78ed84d72c4bb4ded5aa3f33a3e709aa5d88629)) + +### [1.5.3](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.5.2...v1.5.3) (2020-09-22) + + +### Documentation + +* changes without context ([#538](https://www.github.com/googleapis/java-bigquerystorage/issues/538)) ([50bb618](https://www.github.com/googleapis/java-bigquerystorage/commit/50bb618ca736a7a9a1262bbf2d2ad75b70d3cd53)) + + +### Dependencies + +* update arrow-vector to v1 ([#551](https://www.github.com/googleapis/java-bigquerystorage/issues/551)) ([5cdf42c](https://www.github.com/googleapis/java-bigquerystorage/commit/5cdf42cf8cad0a6d30b7072142a7698fafafa8d9)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.120.0 ([#549](https://www.github.com/googleapis/java-bigquerystorage/issues/549)) ([e8dea03](https://www.github.com/googleapis/java-bigquerystorage/commit/e8dea0362de56698a9c3072837e75b31026dd82c)) + +### [1.5.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.5.1...v1.5.2) (2020-09-21) + + +### Bug Fixes + +* geo type accepted change from bytes to string ([#523](https://www.github.com/googleapis/java-bigquerystorage/issues/523)) ([94e6394](https://www.github.com/googleapis/java-bigquerystorage/commit/94e63941e305c64d50829b520594a7e411b15e82)) +* update schema compat check with backend type changes ([#522](https://www.github.com/googleapis/java-bigquerystorage/issues/522)) ([2fcae4f](https://www.github.com/googleapis/java-bigquerystorage/commit/2fcae4fda0bb899efa84aba377073da556ec59d0)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.117.1 ([#514](https://www.github.com/googleapis/java-bigquerystorage/issues/514)) ([2689689](https://www.github.com/googleapis/java-bigquerystorage/commit/26896896fc3b3342363e191a2e7ffcca6ee291c1)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.117.2 ([#529](https://www.github.com/googleapis/java-bigquerystorage/issues/529)) ([74c1854](https://www.github.com/googleapis/java-bigquerystorage/commit/74c1854fbc5e7fea164b7945cb5770c2063032da)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.118.0 ([#530](https://www.github.com/googleapis/java-bigquerystorage/issues/530)) ([ffdbf2a](https://www.github.com/googleapis/java-bigquerystorage/commit/ffdbf2a4f763fe0cb9c47dcaafa44a497426bc11)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.119.0 ([#532](https://www.github.com/googleapis/java-bigquerystorage/issues/532)) ([f005d93](https://www.github.com/googleapis/java-bigquerystorage/commit/f005d93a14df16afff6b0ee6797d2b23da21ebcb)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.9.0 ([#517](https://www.github.com/googleapis/java-bigquerystorage/issues/517)) ([91899a9](https://www.github.com/googleapis/java-bigquerystorage/commit/91899a94617628a27437c72ce702aff91e101ecc)) + +### [1.5.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.5.0...v1.5.1) (2020-08-31) + + +### Bug Fixes + +* temporarily disable reporting to unblock releases ([#506](https://www.github.com/googleapis/java-bigquerystorage/issues/506)) ([042cb41](https://www.github.com/googleapis/java-bigquerystorage/commit/042cb414ab29cc4143a728e211f95fa5c0cfa1a0)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.117.0 ([#508](https://www.github.com/googleapis/java-bigquerystorage/issues/508)) ([15dea1b](https://www.github.com/googleapis/java-bigquerystorage/commit/15dea1b31a57bef6ebc2d7c306b06f186026c10a)) + +## [1.5.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.4.1...v1.5.0) (2020-08-19) + + +### Features + +* Add a flushAll() method that will flush all the inflight request and make sure all responses returned ([#492](https://www.github.com/googleapis/java-bigquerystorage/issues/492)) ([6134818](https://www.github.com/googleapis/java-bigquerystorage/commit/6134818f7bc5ada324a78d5c048bb2eeb83f8ca8)) +* add JsonWriterCache.java and added JsonWriterCache in DirectWriter to allow JsonWrites ([#489](https://www.github.com/googleapis/java-bigquerystorage/issues/489)) ([34193b8](https://www.github.com/googleapis/java-bigquerystorage/commit/34193b88bcf64b97dbcde2183e7587fa44923d4b)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.116.10 ([#483](https://www.github.com/googleapis/java-bigquerystorage/issues/483)) ([285b6f8](https://www.github.com/googleapis/java-bigquerystorage/commit/285b6f846753f387800d2787746ddb43de67a4b7)) + +### [1.4.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.4.0...v1.4.1) (2020-08-10) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.6 ([#477](https://www.github.com/googleapis/java-bigquerystorage/issues/477)) ([f5faba6](https://www.github.com/googleapis/java-bigquerystorage/commit/f5faba6702ca76c102d15cacd72809cc10699eac)) + +## [1.4.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.3.1...v1.4.0) (2020-08-07) + + +### Features + +* add JsonStreamWriter ([#475](https://www.github.com/googleapis/java-bigquerystorage/issues/475)) ([341bf99](https://www.github.com/googleapis/java-bigquerystorage/commit/341bf99d47d788ebabaa1ce775d1a4f527ee46b0)) + + +### Bug Fixes + +* enum value conflict in generated ProtoSchema descriptor. ([#469](https://www.github.com/googleapis/java-bigquerystorage/issues/469)) ([3e1382f](https://www.github.com/googleapis/java-bigquerystorage/commit/3e1382f247de5e6ee8727130280e34fa01d3c088)), closes [#464](https://www.github.com/googleapis/java-bigquerystorage/issues/464) + + +### Dependencies + +* update dependency com.fasterxml.jackson.core:jackson-core to v2.11.2 ([#461](https://www.github.com/googleapis/java-bigquerystorage/issues/461)) ([36b368a](https://www.github.com/googleapis/java-bigquerystorage/commit/36b368a14bb7d45f92af23bc0dd6fe08ad79b085)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.116.9 ([#473](https://www.github.com/googleapis/java-bigquerystorage/issues/473)) ([b4ba055](https://www.github.com/googleapis/java-bigquerystorage/commit/b4ba055a809dbe414e8e19d377b8257d0b5446d6)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.5 ([#467](https://www.github.com/googleapis/java-bigquerystorage/issues/467)) ([35c65d4](https://www.github.com/googleapis/java-bigquerystorage/commit/35c65d466d44d01e6f7626ef7cf1aa0e9f3ff0f3)) + +### [1.3.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.3.0...v1.3.1) (2020-07-31) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.116.7 ([#441](https://www.github.com/googleapis/java-bigquerystorage/issues/441)) ([abc1971](https://www.github.com/googleapis/java-bigquerystorage/commit/abc197169b5f04e0eafb486db0d9038903cefe61)) + +## [1.3.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.2.1...v1.3.0) (2020-07-21) + + +### Features + +* Adding JsonToProtoMessage.java ([#400](https://www.github.com/googleapis/java-bigquerystorage/issues/400)) ([200da6d](https://www.github.com/googleapis/java-bigquerystorage/commit/200da6d66e82eaabceeae56e4ff3d1b8e112bd8d)) +* re-generated to pick up changes from googleapis. ([#435](https://www.github.com/googleapis/java-bigquerystorage/issues/435)) ([bdf35e0](https://www.github.com/googleapis/java-bigquerystorage/commit/bdf35e08987ffdf49f7545131cd0dba56ff72bd9)), closes [#436](https://www.github.com/googleapis/java-bigquerystorage/issues/436) + +### [1.2.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.2.0...v1.2.1) (2020-07-20) + + +### Bug Fixes + +* ProtoSchemaConver's problem when converting fields reference same… ([#428](https://www.github.com/googleapis/java-bigquerystorage/issues/428)) ([1ce2621](https://www.github.com/googleapis/java-bigquerystorage/commit/1ce2621fe633f29c57bc4f4df84b2bcc2c57bdb8)) + +## [1.2.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.1.1...v1.2.0) (2020-07-16) + + +### Features + +* add 'Received Rst Stream' to automatic retry strings ([#419](https://www.github.com/googleapis/java-bigquerystorage/issues/419)) ([1584bdb](https://www.github.com/googleapis/java-bigquerystorage/commit/1584bdbf5820a378d844d333e8546b6a26b3c643)) + + +### Bug Fixes + +* BQTableSchemaToProtobufDescriptor will now only generate lower-cased fieldnames in the protobuf descriptor ([#415](https://www.github.com/googleapis/java-bigquerystorage/issues/415)) ([4672939](https://www.github.com/googleapis/java-bigquerystorage/commit/46729398eee0f728c3de731f803580342fbe787f)) + +### [1.1.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.1.0...v1.1.1) (2020-07-14) + + +### Bug Fixes + +* BQTableSchemaToProtoDescriptor change type mapping ([#402](https://www.github.com/googleapis/java-bigquerystorage/issues/402)) ([413d6f0](https://www.github.com/googleapis/java-bigquerystorage/commit/413d6f03f288fa21511daaa3442fc1fde9ca246f)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.116.6 ([#407](https://www.github.com/googleapis/java-bigquerystorage/issues/407)) ([d0aa73d](https://www.github.com/googleapis/java-bigquerystorage/commit/d0aa73d0fe6233878935ad510bb5d648764872cd)) + +## [1.1.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v1.0.0...v1.1.0) (2020-07-10) + + +### Features + +* add two new fields, ignoreUnknownFields in AppendRowsRequest and update_schema in AppendRowsResponse. ([#389](https://www.github.com/googleapis/java-bigquerystorage/issues/389)) ([3e9d4c8](https://www.github.com/googleapis/java-bigquerystorage/commit/3e9d4c8abae73b25894d330a99e4213057777bc8)) +* Added BQSchemaToProtoDescriptor.java ([#395](https://www.github.com/googleapis/java-bigquerystorage/issues/395)) ([c3a9542](https://www.github.com/googleapis/java-bigquerystorage/commit/c3a9542b146c2a95bd69e6bb940e02f72354141c)) + + +### Dependencies + +* update dependency com.fasterxml.jackson.core:jackson-core to v2.11.1 ([#383](https://www.github.com/googleapis/java-bigquerystorage/issues/383)) ([004e78b](https://www.github.com/googleapis/java-bigquerystorage/commit/004e78be6e5de02ce83ef95e5a40c0f23f4b11a0)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.116.4 ([#404](https://www.github.com/googleapis/java-bigquerystorage/issues/404)) ([dd56e6c](https://www.github.com/googleapis/java-bigquerystorage/commit/dd56e6c2e9fca51d569a700b4d0bb9527b1e347c)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.2 ([#393](https://www.github.com/googleapis/java-bigquerystorage/issues/393)) ([4d3bce6](https://www.github.com/googleapis/java-bigquerystorage/commit/4d3bce618f39b44048ee20ed47b1cd61354117ad)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.3 ([#405](https://www.github.com/googleapis/java-bigquerystorage/issues/405)) ([01c59e2](https://www.github.com/googleapis/java-bigquerystorage/commit/01c59e26c6c5f8bc42112aa89a90da5786343efc)) +* update dependency org.apache.avro:avro to v1.10.0 ([#392](https://www.github.com/googleapis/java-bigquerystorage/issues/392)) ([25dd6d5](https://www.github.com/googleapis/java-bigquerystorage/commit/25dd6d54e129cfb4e0f87f9f05abe4314a01a0dd)) + +## [1.0.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.134.1...v1.0.0) (2020-06-23) + + +### Features + +* promote to GA ([#374](https://www.github.com/googleapis/java-bigquerystorage/issues/374)) ([9d2b891](https://www.github.com/googleapis/java-bigquerystorage/commit/9d2b8916315ae106a8b24c74f786bb2e5dea10e5)) + +### [0.134.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.134.0...v0.134.1) (2020-06-22) + + +### Bug Fixes + +* BQ Numeric is compatible with double and float protobuf types ([#367](https://www.github.com/googleapis/java-bigquerystorage/issues/367)) ([1b2f110](https://www.github.com/googleapis/java-bigquerystorage/commit/1b2f1103d83502836dbc2d6a0d5659e44cd9836d)) +* Renamed SchemaCompact.java to SchemaCompatibility.java ([#362](https://www.github.com/googleapis/java-bigquerystorage/issues/362)) ([d3f4787](https://www.github.com/googleapis/java-bigquerystorage/commit/d3f47877003d24aabe76c5ddf4c78a70a4d86a03)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.116.3 ([#361](https://www.github.com/googleapis/java-bigquerystorage/issues/361)) ([e88095f](https://www.github.com/googleapis/java-bigquerystorage/commit/e88095f45b48b781eca2ee572fcb841ef8a430ee)) + +## [0.134.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.133.4...v0.134.0) (2020-06-18) + + +### Features + +* Added schema compatibility check functionality (SchemaCompact.java) ([#339](https://www.github.com/googleapis/java-bigquerystorage/issues/339)) ([bc2d8cc](https://www.github.com/googleapis/java-bigquerystorage/commit/bc2d8cc82adeeddb21aeb9845e0883d369101513)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.1 ([#346](https://www.github.com/googleapis/java-bigquerystorage/issues/346)) ([9db4c47](https://www.github.com/googleapis/java-bigquerystorage/commit/9db4c475118b03b323b4dc502ec0634692e9dea3)) + +### [0.133.4](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.133.3...v0.133.4) (2020-06-11) + + +### Bug Fixes + +* make awaitTermination and shutdown protected, since we already have close() method, it is confusing to have 3 shutdown methods ([#330](https://www.github.com/googleapis/java-bigquerystorage/issues/330)) ([8856288](https://www.github.com/googleapis/java-bigquerystorage/commit/88562883051fa1d856818d4ff47fcc020e0452de)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.116.2 ([#334](https://www.github.com/googleapis/java-bigquerystorage/issues/334)) ([a611756](https://www.github.com/googleapis/java-bigquerystorage/commit/a611756faea308c7e6714d8eecdb6a295ccb3e6a)) + +### [0.133.3](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.133.2...v0.133.3) (2020-06-04) + + +### Bug Fixes + +* add retry logic for readrows v1beta1 ([#314](https://www.github.com/googleapis/java-bigquerystorage/issues/314)) ([5290cec](https://www.github.com/googleapis/java-bigquerystorage/commit/5290cec444eaf1a21bcea543ac002276b82957e8)) +* add retry logic for readrows v1beta2 ([#315](https://www.github.com/googleapis/java-bigquerystorage/issues/315)) ([cf1ab06](https://www.github.com/googleapis/java-bigquerystorage/commit/cf1ab06b4324219d2558bef6d30389dbf5d37ab7)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.116.1 ([#321](https://www.github.com/googleapis/java-bigquerystorage/issues/321)) ([c08116b](https://www.github.com/googleapis/java-bigquerystorage/commit/c08116b0fdabe256635bc8ba96b2c307f33a3998)) +* update libraries-bom version ([dd21613](https://www.github.com/googleapis/java-bigquerystorage/commit/dd216134b98816f4b3cc6bd036eab01ebf04df35)) + + +### Documentation + +* **samples:** add codeowners file ([#317](https://www.github.com/googleapis/java-bigquerystorage/issues/317)) ([fc351c7](https://www.github.com/googleapis/java-bigquerystorage/commit/fc351c702ab0674f668a396559e18898f4653fdb)) +* **samples:** add samples ([#309](https://www.github.com/googleapis/java-bigquerystorage/issues/309)) ([f142827](https://www.github.com/googleapis/java-bigquerystorage/commit/f142827253f7262f1d47d4c557c92076e1fa5fbe)) + +### [0.133.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.133.1...v0.133.2) (2020-05-28) + + +### Documentation + +* **samples:** migrate samples into client ([#308](https://www.github.com/googleapis/java-bigquerystorage/issues/308)) ([c2161fb](https://www.github.com/googleapis/java-bigquerystorage/commit/c2161fbccb6043b0fec7b25eb3de1a0f4f61e7a1)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.7.0 ([#302](https://www.github.com/googleapis/java-bigquerystorage/issues/302)) ([5d22cb0](https://www.github.com/googleapis/java-bigquerystorage/commit/5d22cb09c85c7f4f8cf4f999e23747ac636023ab)) + +### [0.133.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.133.0...v0.133.1) (2020-05-27) + + +### Bug Fixes + +* add retry logic for readrows ([#263](https://www.github.com/googleapis/java-bigquerystorage/issues/263)) ([50345b6](https://www.github.com/googleapis/java-bigquerystorage/commit/50345b653d59209b7912b56b76c6d41e289ecb30)) + + +### Dependencies + +* bump shared-deps version and add back certain test deps ([#300](https://www.github.com/googleapis/java-bigquerystorage/issues/300)) ([edfa26b](https://www.github.com/googleapis/java-bigquerystorage/commit/edfa26bb5bfa506004a6d3e39775f9f66b956db9)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.115.1 ([#294](https://www.github.com/googleapis/java-bigquerystorage/issues/294)) ([75a08c3](https://www.github.com/googleapis/java-bigquerystorage/commit/75a08c3683fde88264d310f965a0c973b54dfd9e)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.116.0 ([#296](https://www.github.com/googleapis/java-bigquerystorage/issues/296)) ([d243ece](https://www.github.com/googleapis/java-bigquerystorage/commit/d243ece29dd1494531f623b69e609fb833e970dc)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.4.0 ([#291](https://www.github.com/googleapis/java-bigquerystorage/issues/291)) ([20b2963](https://www.github.com/googleapis/java-bigquerystorage/commit/20b2963bbf315951d1607bee63a7aa083cdb6c86)) + +## [0.133.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.132.0...v0.133.0) (2020-05-15) + + +### Features + +* Add flush API to StreamWriter ([#278](https://www.github.com/googleapis/java-bigquerystorage/issues/278)) ([f617259](https://www.github.com/googleapis/java-bigquerystorage/commit/f61725995fdc905581dca1109f30afed54d2da8e)) + + +### Bug Fixes + +* try to make test run a bit faster, and update some logs. ([#279](https://www.github.com/googleapis/java-bigquerystorage/issues/279)) ([4749b85](https://www.github.com/googleapis/java-bigquerystorage/commit/4749b8516b8f5802091eb21fa349b1c5c58fb48a)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.115.0 ([#283](https://www.github.com/googleapis/java-bigquerystorage/issues/283)) ([873d787](https://www.github.com/googleapis/java-bigquerystorage/commit/873d78765eafed471f175d02450c4a107a801219)) + +## [0.132.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.131.2...v0.132.0) (2020-05-13) + + +### Features + +* add a Flush API to enable finer grained data commit needs for dataflow. ([#272](https://www.github.com/googleapis/java-bigquerystorage/issues/272)) ([b1c827f](https://www.github.com/googleapis/java-bigquerystorage/commit/b1c827f8d60f747ce71e2288935439b7c16c0076)) + + +### Documentation + +* update CONTRIBUTING.md to include code formatting ([#534](https://www.github.com/googleapis/java-bigquerystorage/issues/534)) ([#273](https://www.github.com/googleapis/java-bigquerystorage/issues/273)) ([9f071d0](https://www.github.com/googleapis/java-bigquerystorage/commit/9f071d006972f40992284fff1f39162d5b521c5e)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.114.0 ([#269](https://www.github.com/googleapis/java-bigquerystorage/issues/269)) ([d71e6b7](https://www.github.com/googleapis/java-bigquerystorage/commit/d71e6b7166bc17579c33400c443ef7c5eec7ee8c)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.3.1 ([#274](https://www.github.com/googleapis/java-bigquerystorage/issues/274)) ([9c9471a](https://www.github.com/googleapis/java-bigquerystorage/commit/9c9471a141db069b40e93d76d5632c5cf8ab6257)) + +### [0.131.2](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.131.1...v0.131.2) (2020-05-08) + + +### Bug Fixes + +* Avoid setting error on response future twice ([#261](https://www.github.com/googleapis/java-bigquerystorage/issues/261)) ([35ef0ed](https://www.github.com/googleapis/java-bigquerystorage/commit/35ef0ed80d55fd4ca015e3a9b6b631d3d893aa1a)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.3.0 ([#256](https://www.github.com/googleapis/java-bigquerystorage/issues/256)) ([252440a](https://www.github.com/googleapis/java-bigquerystorage/commit/252440a84d45d9c13e468e7b59fe4702499143a9)) + +### [0.131.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.131.0...v0.131.1) (2020-05-04) + + +### Bug Fixes + +* flaky writeapi manual client tests ([#238](https://www.github.com/googleapis/java-bigquerystorage/issues/238)) ([89c8623](https://www.github.com/googleapis/java-bigquerystorage/commit/89c8623e082cacdc8e0843bffb67da4dc8b79df3)) +* more writeapi manual client test issues ([#241](https://www.github.com/googleapis/java-bigquerystorage/issues/241)) ([65c5ec9](https://www.github.com/googleapis/java-bigquerystorage/commit/65c5ec9c27e901b1633402d5fbbbbb83f956ed97)) + +## [0.131.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.130.0...v0.131.0) (2020-04-28) + + +### Features + +* integrate gapic generator change in googleapis ([#220](https://www.github.com/googleapis/java-bigquerystorage/issues/220)) ([1565dc0](https://www.github.com/googleapis/java-bigquerystorage/commit/1565dc08515d4665b812d86223765f5cf9629b16)) + + +### Bug Fixes + +* several StreamWriter issues ([#213](https://www.github.com/googleapis/java-bigquerystorage/issues/213)) ([b803863](https://www.github.com/googleapis/java-bigquerystorage/commit/b80386394f3082e9695712343f37afc4d29df76f)) + + +### Dependencies + +* update dependency com.fasterxml.jackson.core:jackson-core to v2.11.0 ([#224](https://www.github.com/googleapis/java-bigquerystorage/issues/224)) ([87d5248](https://www.github.com/googleapis/java-bigquerystorage/commit/87d52483b7bca906df16b4432a0d7bdde6c23726)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.111.1 ([#210](https://www.github.com/googleapis/java-bigquerystorage/issues/210)) ([d898a61](https://www.github.com/googleapis/java-bigquerystorage/commit/d898a61f1c48e763e37a234364ee1a590187e8ec)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.2.0 ([#207](https://www.github.com/googleapis/java-bigquerystorage/issues/207)) ([ae29920](https://www.github.com/googleapis/java-bigquerystorage/commit/ae299204422ecb0b98747c96a2e9eac2fa1fbd14)) +* update dependency com.google.truth:truth to v1 ([#199](https://www.github.com/googleapis/java-bigquerystorage/issues/199)) ([440d85b](https://www.github.com/googleapis/java-bigquerystorage/commit/440d85bff1930835e1b4d0c13eeb2ce8209d658f)) + +## [0.130.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.129.0...v0.130.0) (2020-04-20) + + +### Features + +* use shared-dependencies bom and add flatten to grpc and proto modules ([#186](https://www.github.com/googleapis/java-bigquerystorage/issues/186)) ([532a698](https://www.github.com/googleapis/java-bigquerystorage/commit/532a698193542b0a5db03d2e29a415082f2de95b)) + +## [0.129.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.128.1...v0.129.0) (2020-04-16) + + +### Features + +* Direct writer ([#165](https://www.github.com/googleapis/java-bigquerystorage/issues/165)) ([ed718c1](https://www.github.com/googleapis/java-bigquerystorage/commit/ed718c14289a3ea41f6ef7cccd8b00d7c7c0ba6c)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-bigquery to v1.110.1 ([#155](https://www.github.com/googleapis/java-bigquerystorage/issues/155)) ([a0b0cfd](https://www.github.com/googleapis/java-bigquerystorage/commit/a0b0cfd69bd0e26c0ae3748dd9fe35431bf41b2d)) +* update dependency com.google.guava:guava-bom to v29 ([#166](https://www.github.com/googleapis/java-bigquerystorage/issues/166)) ([81c87d6](https://www.github.com/googleapis/java-bigquerystorage/commit/81c87d67332033da8f998dd281954fe362f590f2)) + +### [0.128.1](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.128.0...v0.128.1) (2020-04-07) + + +### Bug Fixes + +* waitForTermination in the manual client [#140](https://www.github.com/googleapis/java-bigquerystorage/issues/140) ([#141](https://www.github.com/googleapis/java-bigquerystorage/issues/141)) ([bdb8e0f](https://www.github.com/googleapis/java-bigquerystorage/commit/bdb8e0f6b0b8ab9b1e2e92d6e41ea3298964dd3e)) + + +### Dependencies + +* update core dependencies ([#149](https://www.github.com/googleapis/java-bigquerystorage/issues/149)) ([dbe270f](https://www.github.com/googleapis/java-bigquerystorage/commit/dbe270fb3b1ff28d231e5d401ce2b140bec4f68d)) +* update dependency org.threeten:threetenbp to v1.4.3 ([#144](https://www.github.com/googleapis/java-bigquerystorage/issues/144)) ([c1f2731](https://www.github.com/googleapis/java-bigquerystorage/commit/c1f27318ebb127980ff8ced2551610095dcfdf9e)) + +## [0.128.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.127.0...v0.128.0) (2020-04-04) + + +### Features + +* add flatten plugin ([#117](https://www.github.com/googleapis/java-bigquerystorage/issues/117)) ([c01bbc7](https://www.github.com/googleapis/java-bigquerystorage/commit/c01bbc7df8d89c6b0b5b3e7f53b541c4575ed119)) +* re-generated to pick up changes in the API or client library generator. ([#134](https://www.github.com/googleapis/java-bigquerystorage/issues/134)) ([0541775](https://www.github.com/googleapis/java-bigquerystorage/commit/054177514fc63f5c84bcca8d71b953f06b1807ca)), closes [#112](https://www.github.com/googleapis/java-bigquerystorage/issues/112) [#112](https://www.github.com/googleapis/java-bigquerystorage/issues/112) [#132](https://www.github.com/googleapis/java-bigquerystorage/issues/132) [#132](https://www.github.com/googleapis/java-bigquerystorage/issues/132) [#122](https://www.github.com/googleapis/java-bigquerystorage/issues/122) [#122](https://www.github.com/googleapis/java-bigquerystorage/issues/122) + +## [0.127.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.126.0...v0.127.0) (2020-04-03) + + +### Features + +* create manual client for Write API ([#112](https://www.github.com/googleapis/java-bigquerystorage/issues/112)) ([98851e9](https://www.github.com/googleapis/java-bigquerystorage/commit/98851e96f7c20228cf888e4a847ac98f3da2e4b7)) +* proto converter library ([#100](https://www.github.com/googleapis/java-bigquerystorage/issues/100)) ([8fbb80e](https://www.github.com/googleapis/java-bigquerystorage/commit/8fbb80eccdfafe8ffd5ff24fe04132878d09d9ae)) + + +### Dependencies + +* update core dependencies ([#98](https://www.github.com/googleapis/java-bigquerystorage/issues/98)) ([0983575](https://www.github.com/googleapis/java-bigquerystorage/commit/09835752000a99f1cef113cc0808b0c7c907c190)) +* update core dependencies to v1.55.0 ([#127](https://www.github.com/googleapis/java-bigquerystorage/issues/127)) ([99cedf9](https://www.github.com/googleapis/java-bigquerystorage/commit/99cedf926a7b51f58dbec6539ecba350a5e5b845)) +* update dependency com.google.api:api-common to v1.9.0 ([#126](https://www.github.com/googleapis/java-bigquerystorage/issues/126)) ([27db090](https://www.github.com/googleapis/java-bigquerystorage/commit/27db090caf065c68f724f0457df47b83a186e91c)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.109.0 ([#114](https://www.github.com/googleapis/java-bigquerystorage/issues/114)) ([f5b48d8](https://www.github.com/googleapis/java-bigquerystorage/commit/f5b48d85f4561e31b01b614d4395c4c3b1d65ed4)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.110.0 ([#115](https://www.github.com/googleapis/java-bigquerystorage/issues/115)) ([9b20371](https://www.github.com/googleapis/java-bigquerystorage/commit/9b20371be163ed69cdb2260023afb9512102d4d5)) +* update dependency com.google.cloud:google-cloud-core to v1.93.3 ([#104](https://www.github.com/googleapis/java-bigquerystorage/issues/104)) ([519bd1e](https://www.github.com/googleapis/java-bigquerystorage/commit/519bd1ed7cfa7ba4bd4f370cf05b50e85256ac2a)) +* update dependency com.google.cloud.samples:shared-configuration to v1.0.13 ([#123](https://www.github.com/googleapis/java-bigquerystorage/issues/123)) ([819dc70](https://www.github.com/googleapis/java-bigquerystorage/commit/819dc70ac8cc6688cea5276a2ec2af323b26d55d)) +* update dependency io.grpc:grpc-bom to v1.28.1 ([#132](https://www.github.com/googleapis/java-bigquerystorage/issues/132)) ([711c5c7](https://www.github.com/googleapis/java-bigquerystorage/commit/711c5c793e992098f96f7683f4e9643ee7424d81)) +* update dependency org.threeten:threetenbp to v1.4.2 ([#113](https://www.github.com/googleapis/java-bigquerystorage/issues/113)) ([09451c1](https://www.github.com/googleapis/java-bigquerystorage/commit/09451c11fbdc9ab1bb32f41a90970cab3c9589ab)) + + +### Documentation + +* **regen:** fix closing backtick in docs ([#109](https://www.github.com/googleapis/java-bigquerystorage/issues/109)) ([98f3cb2](https://www.github.com/googleapis/java-bigquerystorage/commit/98f3cb2065e8fdb3de263fa8288278e37d6160b3)) + +## [0.126.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.125.0...v0.126.0) (2020-03-09) + + +### Features + +* re-generated to pick up changes in the API or client library generator. ([#85](https://www.github.com/googleapis/java-bigquerystorage/issues/85)) ([e3f4087](https://www.github.com/googleapis/java-bigquerystorage/commit/e3f40879dfda7c2dac1cf16b23605912174e2601)) +* re-generated to pick up changes in the API or client library generator. ([#95](https://www.github.com/googleapis/java-bigquerystorage/issues/95)) ([7e760a4](https://www.github.com/googleapis/java-bigquerystorage/commit/7e760a4d2782ec1674b0d3da72ba2eeed4d101a1)) +* re-generated to pick up changes in the API or client library generator. ([#97](https://www.github.com/googleapis/java-bigquerystorage/issues/97)) ([c1f1854](https://www.github.com/googleapis/java-bigquerystorage/commit/c1f1854bbe4362449b0afd427e88ab9414cd8fac)) + + +### Dependencies + +* update core dependencies ([#89](https://www.github.com/googleapis/java-bigquerystorage/issues/89)) ([bdf1385](https://www.github.com/googleapis/java-bigquerystorage/commit/bdf13853a8abd791c4a376284a5afeed1d2afbd8)) +* update dependency com.fasterxml.jackson.core:jackson-core to v2.10.3 ([#92](https://www.github.com/googleapis/java-bigquerystorage/issues/92)) ([551d024](https://www.github.com/googleapis/java-bigquerystorage/commit/551d02482c8694d32e465acccbf8de6ae515d3c8)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.107.0 ([#82](https://www.github.com/googleapis/java-bigquerystorage/issues/82)) ([96e55ee](https://www.github.com/googleapis/java-bigquerystorage/commit/96e55ee322a9fcb0b05f3a942eefc48e5f1233d0)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.108.0 ([#91](https://www.github.com/googleapis/java-bigquerystorage/issues/91)) ([aa7b8b8](https://www.github.com/googleapis/java-bigquerystorage/commit/aa7b8b84236261b1c88367851cbffe3126d81a50)) +* update dependency com.google.cloud:google-cloud-bigquery to v1.108.1 ([#96](https://www.github.com/googleapis/java-bigquerystorage/issues/96)) ([11b4418](https://www.github.com/googleapis/java-bigquerystorage/commit/11b44186007dd7eb15f3daf5d559c705003e8709)) +* update dependency com.google.cloud:google-cloud-core to v1.93.1 ([#93](https://www.github.com/googleapis/java-bigquerystorage/issues/93)) ([aa10c59](https://www.github.com/googleapis/java-bigquerystorage/commit/aa10c59ea155eec8de8433dbb8ef924327bf60a2)) +* update dependency io.grpc:grpc-bom to v1.27.2 ([#88](https://www.github.com/googleapis/java-bigquerystorage/issues/88)) ([cdba693](https://www.github.com/googleapis/java-bigquerystorage/commit/cdba693add40b7571a43b4b0c5ca8a772e0333c5)) + +## [0.125.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.124.0...v0.125.0) (2020-02-18) + + +### Features + +* add ArrowSerializationOptions to TableReadOptions ([#76](https://www.github.com/googleapis/java-bigquerystorage/issues/76)) ([df5d4cb](https://www.github.com/googleapis/java-bigquerystorage/commit/df5d4cba8f599ad60abc1266f75f63ffaa1d03e1)) +* re-generated to pick up changes in the API or client library generator. ([#70](https://www.github.com/googleapis/java-bigquerystorage/issues/70)) ([3631965](https://www.github.com/googleapis/java-bigquerystorage/commit/363196550f5d5a7381ec88e3404d334475fe430e)) +* re-generated to pick up changes in the API or client library generator. ([#74](https://www.github.com/googleapis/java-bigquerystorage/issues/74)) ([a41f1a7](https://www.github.com/googleapis/java-bigquerystorage/commit/a41f1a7bcffc1d2076ecbf4647eb2e128ba595be)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-core to v1.92.5 ([#73](https://www.github.com/googleapis/java-bigquerystorage/issues/73)) ([a822658](https://www.github.com/googleapis/java-bigquerystorage/commit/a822658649c1c306933ea97d466f254b391622eb)) +* update dependency com.google.protobuf:protobuf-java to v3.11.4 ([#79](https://www.github.com/googleapis/java-bigquerystorage/issues/79)) ([2c1c640](https://www.github.com/googleapis/java-bigquerystorage/commit/2c1c64074d0012b9b3c9d729278f643e5fde1658)) +* update dependency io.grpc:grpc-bom to v1.27.1 ([#78](https://www.github.com/googleapis/java-bigquerystorage/issues/78)) ([c4814dc](https://www.github.com/googleapis/java-bigquerystorage/commit/c4814dc0900e95828ae296ff44e4fa4b40daa6cd)) +* update dependency org.apache.avro:avro to v1.9.2 ([#77](https://www.github.com/googleapis/java-bigquerystorage/issues/77)) ([8f8fa6a](https://www.github.com/googleapis/java-bigquerystorage/commit/8f8fa6ae7163a295c3586c575751a6481341dc6b)) + +## [0.124.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.123.0...v0.124.0) (2020-02-07) + + +### Features + +* add an enhanced layer for BigQuery Storage v1 client ([#66](https://www.github.com/googleapis/java-bigquerystorage/issues/66)) ([43fc284](https://www.github.com/googleapis/java-bigquerystorage/commit/43fc284e00ddbc9a018d734e3f6f09c82ebd92d4)) +* add v1 integration tests ([#68](https://www.github.com/googleapis/java-bigquerystorage/issues/68)) ([8e7ac15](https://www.github.com/googleapis/java-bigquerystorage/commit/8e7ac1511b9f9eaea417e6761848e4735039a831)) + +## [0.123.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.122.0...v0.123.0) (2020-02-06) + + +### Features + +* v1 client generation ([#64](https://www.github.com/googleapis/java-bigquerystorage/issues/64)) ([902156d](https://www.github.com/googleapis/java-bigquerystorage/commit/902156d576845499e3eeedeff44c47d67e228098)) + +## [0.122.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.121.0...v0.122.0) (2020-02-05) + + +### Features + +* re-generated to pick up changes in the API or client library generator. ([#49](https://www.github.com/googleapis/java-bigquerystorage/issues/49)) ([00d8ccb](https://www.github.com/googleapis/java-bigquerystorage/commit/00d8ccbfd26effcb2e5e3be3cd242202a65e43b8)) +* re-generated to pick up changes in the API or client library generator. ([#59](https://www.github.com/googleapis/java-bigquerystorage/issues/59)) ([f63b305](https://www.github.com/googleapis/java-bigquerystorage/commit/f63b3051fbd8defb4f7be7c00fd504f137a67897)) + + +### Bug Fixes + +* track v1alpha2 versions ([#58](https://www.github.com/googleapis/java-bigquerystorage/issues/58)) ([4271524](https://www.github.com/googleapis/java-bigquerystorage/commit/4271524956faea6ccc888d750afba4160e1fd453)) + + +### Documentation + +* update libraries-bom ([#54](https://www.github.com/googleapis/java-bigquerystorage/issues/54)) ([abf05eb](https://www.github.com/googleapis/java-bigquerystorage/commit/abf05ebe09ac8f71f3b59e5f8473fe3d56f4242e)) + +## [0.121.0](https://www.github.com/googleapis/java-bigquerystorage/compare/v0.120.1...v0.121.0) (2020-02-03) + + +### Features + +* add an enhanced layer for BigQuery Storage v1beta2 client ([#48](https://www.github.com/googleapis/java-bigquerystorage/issues/48)) ([9496158](https://www.github.com/googleapis/java-bigquerystorage/commit/949615823c5bb12ef749639d7337f6341973ddbf)) +* add integration tests for v1beta2 BigQuery Storage API ([#50](https://www.github.com/googleapis/java-bigquerystorage/issues/50)) ([bd37cf3](https://www.github.com/googleapis/java-bigquerystorage/commit/bd37cf385fe666702224d20aed1ad087d7346d57)) +* add v1beta2, v1alpha2 clients ([#44](https://www.github.com/googleapis/java-bigquerystorage/issues/44)) ([8c124a2](https://www.github.com/googleapis/java-bigquerystorage/commit/8c124a2fb4d73808b8e0f9267d5422658807a9d2)) + + +### Dependencies + +* bump dependency versions ([#53](https://www.github.com/googleapis/java-bigquerystorage/issues/53)) ([b6418b4](https://www.github.com/googleapis/java-bigquerystorage/commit/b6418b45771fb8861a0743b0bf5bb55a5a5b4e78)) +* update core dependencies ([#17](https://www.github.com/googleapis/java-bigquerystorage/issues/17)) ([c17786e](https://www.github.com/googleapis/java-bigquerystorage/commit/c17786ef94ffb05818d2d7ebb7958bf661f93896)) +* update dependency com.google.guava:guava-bom to v28.2-android ([#20](https://www.github.com/googleapis/java-bigquerystorage/issues/20)) ([24bf682](https://www.github.com/googleapis/java-bigquerystorage/commit/24bf682c2fcacc8245800ed55881d4c88e1d748e)) +* update dependency org.threeten:threetenbp to v1.4.1 ([617db50](https://www.github.com/googleapis/java-bigquerystorage/commit/617db50f70095fa574e2fb5316dfa7b5e609bd5e)) + +## [0.120.0](https://www.github.com/googleapis/java-bigquerystorage/compare/0.120.0-beta...v0.120.0) (2020-01-07) + + +### Dependencies + +* update dependency junit:junit to v4.13 ([#21](https://www.github.com/googleapis/java-bigquerystorage/issues/21)) ([190ad2a](https://www.github.com/googleapis/java-bigquerystorage/commit/190ad2ab6996603a8b022ddc54dbb3195907e564)) + + +### Documentation + +* **regen:** javadoc proto class changes from protoc update ([#9](https://www.github.com/googleapis/java-bigquerystorage/issues/9)) ([d371b4a](https://www.github.com/googleapis/java-bigquerystorage/commit/d371b4a5b4d1cb343cb00d645e263fe62b5ecbd2)) diff --git a/java-bigquerystorage/README.md b/java-bigquerystorage/README.md new file mode 100644 index 000000000000..ec75b0ca59d5 --- /dev/null +++ b/java-bigquerystorage/README.md @@ -0,0 +1,266 @@ +# Google BigQuery Storage Client for Java + +Java idiomatic client for [BigQuery Storage][product-docs]. + +[![Maven][maven-version-image]][maven-version-link] +![Stability][stability-image] + +- [Product Documentation][product-docs] +- [Client Library Documentation][javadocs] + + +## Quickstart + +If you are using Maven with [BOM][libraries-bom], add this to your pom.xml file: + +```xml + + + + com.google.cloud + libraries-bom + 26.70.0 + pom + import + + + io.opentelemetry + opentelemetry-bom + 1.52.0 + pom + import + + + + + + + com.google.cloud + google-cloud-bigquerystorage + + +``` + +If you are using Maven without the BOM, add this to your dependencies: + + +```xml + + com.google.cloud + google-cloud-bigquerystorage + 3.17.2 + + +``` + +If you are using Gradle 5.x or later, add this to your dependencies: + +```Groovy +implementation platform('com.google.cloud:libraries-bom:26.73.0') + +implementation 'com.google.cloud:google-cloud-bigquerystorage' +``` +If you are using Gradle without BOM, add this to your dependencies: + +```Groovy +implementation 'com.google.cloud:google-cloud-bigquerystorage:3.19.1' +``` + +If you are using SBT, add this to your dependencies: + +```Scala +libraryDependencies += "com.google.cloud" % "google-cloud-bigquerystorage" % "3.19.1" +``` + +## Authentication + +See the [Authentication][authentication] section in the base directory's README. + +## Authorization + +The client application making API calls must be granted [authorization scopes][auth-scopes] required for the desired BigQuery Storage APIs, and the authenticated principal must have the [IAM role(s)][predefined-iam-roles] required to access GCP resources using the BigQuery Storage API calls. + +## Getting Started + +### Prerequisites + +You will need a [Google Cloud Platform Console][developer-console] project with the BigQuery Storage [API enabled][enable-api]. +You will need to [enable billing][enable-billing] to use Google BigQuery Storage. +[Follow these instructions][create-project] to get your project set up. You will also need to set up the local development environment by +[installing the Google Cloud Command Line Interface][cloud-cli] and running the following commands in command line: +`gcloud auth login` and `gcloud config set project [YOUR PROJECT ID]`. + +### Installation and setup + +You'll need to obtain the `google-cloud-bigquerystorage` library. See the [Quickstart](#quickstart) section +to add `google-cloud-bigquerystorage` as a dependency in your code. + +## About BigQuery Storage + + +[BigQuery Storage][product-docs] is an API for reading data stored in BigQuery. This API provides direct, high-throughput read access to existing BigQuery tables, supports parallel access with automatic liquid sharding, and allows fine-grained control over what data is returned. + +See the [BigQuery Storage client library docs][javadocs] to learn how to +use this BigQuery Storage Client Library. + + +## OpenTelemetry support +The client supports emitting metrics to OpenTelemetry. This is disabled by default. It can be enabled by calling +``` +JsonStreamWriter.Builder.setEnableOpenTelemetry(true) +``` +The following metric attributes are supported. +| Key | Value | +|-----------------|------------------------------------------------------------------------------------------------------------------------------------| +| `error_code` | Specifies error code in the event an append request fails, or a connection ends. | +| `is_retry` | Indicates this was a retry operation. This can be set for either ack’ed requests or connection retry attempts. | +| `table_id` | Holds fully qualified name of destination table | +| `trace_field_1` | If a colon-separated traceId is provided, this holds the first portion. Must be non-empty. Currently populated only for Dataflow. | +| `trace_field_2` | If a colon-separated traceId is provided, this holds the second portion. Must be non-empty. Currently populated only for Dataflow. | +| `trace_field_3` | If a colon-separated traceId is provided, this holds the third portion. Must be non-empty. Currently populated only for Dataflow. | +| `writer_id` | Specifies writer instance id. | +The following metrics are supported. +| Name | Kind | Description | +|------------------------------|---------------------|------------------------------------------------------------------------------------------------------------------| +| `active_connection_count` | Asynchronous gauge | Reports number of active connections | +| `append_requests_acked` | Synchronous counter | Counts number of requests acked by the server | +| `append_request_bytes_acked` | Synchronous counter | Counts byte size of requests acked by the server | +| `append_rows_acked` | Synchronous counter | Counts number of rows in requests acked by the server | +| `connection_end_count` | Synchronous counter | Counts number of connection end events. This is decorated with the error code. | +| `connection_start_count` | Synchronous counter | Counts number of connection attempts made, regardless of whether these are initial or retry. | +| `inflight_queue_length` | Asynchronous gauge | Reports length of inflight queue. This queue contains sent append requests waiting for response from the server. | +| `network_response_latency` | Histogram | Reports time taken in milliseconds for a response to arrive once a message has been sent over the network. | +### Exporting OpenTelemetry metrics +An exporter or collector must be installed by the application in order for [OpenTelemetry metrics to be captured](https://opentelemetry.io/docs/concepts/components/#exporters). +The [sample application](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/test/java/com/example/bigquerystorage/ExportOpenTelemetryIT.java) uses [Google Monitoring Metrics Exporter](https://github.com/GoogleCloudPlatform/opentelemetry-operations-java/tree/main/exporters/metrics) to export metrics to a Google Cloud project. + + + + +## Samples + +Samples are in the [`samples/`](https://github.com/googleapis/google-cloud-java/tree/main/samples) directory. + +| Sample | Source Code | Try it | +| --------------------------- | --------------------------------- | ------ | +| Export Open Telemetry | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/ExportOpenTelemetry.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/ExportOpenTelemetry.java) | +| Json Writer Stream Cdc | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/JsonWriterStreamCdc.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/JsonWriterStreamCdc.java) | +| Parallel Write Committed Stream | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/ParallelWriteCommittedStream.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/ParallelWriteCommittedStream.java) | +| Read Timestamp Arrow | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampArrow.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampArrow.java) | +| Read Timestamp Avro | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampAvro.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampAvro.java) | +| Storage Arrow Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/StorageArrowSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/StorageArrowSample.java) | +| Storage Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/StorageSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/StorageSample.java) | +| Write Buffered Stream | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteBufferedStream.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteBufferedStream.java) | +| Write Committed Stream | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteCommittedStream.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteCommittedStream.java) | +| Write Nested Proto | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteNestedProto.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteNestedProto.java) | +| Write Pending Stream | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WritePendingStream.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WritePendingStream.java) | +| Write To Default Stream | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStream.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStream.java) | +| Write To Default Stream Timestamp Json | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJson.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJson.java) | +| Write To Default Stream Timestamp With Arrow | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrow.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrow.java) | +| Write To Default Stream With Arrow | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamWithArrow.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamWithArrow.java) | + + + +## Troubleshooting + +To get help, follow the instructions in the [shared Troubleshooting document][troubleshooting]. + +## Transport + +BigQuery Storage uses gRPC for the transport layer. + +## Supported Java Versions + +Java 8 or above is required for using this client. + +Google's Java client libraries, +[Google Cloud Client Libraries][cloudlibs] +and +[Google Cloud API Libraries][apilibs], +follow the +[Oracle Java SE support roadmap][oracle] +(see the Oracle Java SE Product Releases section). + +### For new development + +In general, new feature development occurs with support for the lowest Java +LTS version covered by Oracle's Premier Support (which typically lasts 5 years +from initial General Availability). If the minimum required JVM for a given +library is changed, it is accompanied by a [semver][semver] major release. + +Java 11 and (in September 2021) Java 17 are the best choices for new +development. + +### Keeping production systems current + +Google tests its client libraries with all current LTS versions covered by +Oracle's Extended Support (which typically lasts 8 years from initial +General Availability). + +#### Legacy support + +Google's client libraries support legacy versions of Java runtimes with long +term stable libraries that don't receive feature updates on a best efforts basis +as it may not be possible to backport all patches. + +Google provides updates on a best efforts basis to apps that continue to use +Java 7, though apps might need to upgrade to current versions of the library +that supports their JVM. + +#### Where to find specific information + +The latest versions and the supported Java versions are identified on +the individual GitHub repository `github.com/GoogleAPIs/java-SERVICENAME` +and on [google-cloud-java][g-c-j]. + +## Versioning + + +This library follows [Semantic Versioning](http://semver.org/). + + + +## Contributing + + +Contributions to this library are always welcome and highly encouraged. + +See [CONTRIBUTING][contributing] for more information how to get started. + +Please note that this project is released with a Contributor Code of Conduct. By participating in +this project you agree to abide by its terms. See [Code of Conduct][code-of-conduct] for more +information. + + +## License + +Apache 2.0 - See [LICENSE][license] for more information. + +Java is a registered trademark of Oracle and/or its affiliates. + +[product-docs]: https://cloud.google.com/bigquery/docs/reference/storage/ +[javadocs]: https://cloud.google.com/java/docs/reference/google-cloud-bigquerystorage/latest/history +[stability-image]: https://img.shields.io/badge/stability-stable-green +[maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-bigquerystorage.svg +[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigquerystorage/3.19.1 +[authentication]: https://github.com/googleapis/google-cloud-java#authentication +[auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes +[predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles +[iam-policy]: https://cloud.google.com/iam/docs/overview#cloud-iam-policy +[developer-console]: https://console.developers.google.com/ +[create-project]: https://cloud.google.com/resource-manager/docs/creating-managing-projects +[cloud-cli]: https://cloud.google.com/cli +[troubleshooting]: https://github.com/googleapis/google-cloud-java/blob/main/TROUBLESHOOTING.md +[contributing]: https://github.com/googleapis/google-cloud-java/blob/main/CONTRIBUTING.md +[code-of-conduct]: https://github.com/googleapis/google-cloud-java/blob/main/CODE_OF_CONDUCT.md#contributor-code-of-conduct +[license]: https://github.com/googleapis/google-cloud-java/blob/main/LICENSE +[enable-billing]: https://cloud.google.com/apis/docs/getting-started#enabling_billing +[enable-api]: https://console.cloud.google.com/flows/enableapi?apiid=bigquerystorage.googleapis.com +[libraries-bom]: https://github.com/GoogleCloudPlatform/cloud-opensource-java/wiki/The-Google-Cloud-Platform-Libraries-BOM +[shell_img]: https://gstatic.com/cloudssh/images/open-btn.png + +[semver]: https://semver.org/ +[cloudlibs]: https://cloud.google.com/apis/docs/client-libraries-explained +[apilibs]: https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries +[oracle]: https://www.oracle.com/java/technologies/java-se-support-roadmap.html +[g-c-j]: http://github.com/googleapis/google-cloud-java diff --git a/java-bigquerystorage/google-cloud-bigquerystorage-bom/pom.xml b/java-bigquerystorage/google-cloud-bigquerystorage-bom/pom.xml new file mode 100644 index 000000000000..2244f1174c14 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage-bom/pom.xml @@ -0,0 +1,128 @@ + + + 4.0.0 + com.google.cloud + google-cloud-bigquerystorage-bom + 3.19.1 + pom + + com.google.cloud + sdk-platform-java-config + 3.55.1 + + + Google Cloud bigquerystorage BOM + https://github.com/googleapis/java-bigquerystorage + + BOM for BigQuery Storage + + + + Google LLC + + + + + chingor13 + Jeff Ching + chingor@google.com + Google LLC + + Developer + + + + + + scm:git:https://github.com/googleapis/java-bigquerystorage.git + scm:git:git@github.com:googleapis/java-bigquerystorage.git + https://github.com/googleapis/java-bigquerystorage + + + + + The Apache Software License, Version 2.0 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + + + + com.google.cloud + google-cloud-bigquerystorage + 3.19.1 + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta1 + 0.191.1 + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta2 + 0.191.1 + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1 + 3.19.1 + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1alpha + 3.19.1 + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta + 3.19.1 + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta1 + 0.191.1 + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta2 + 0.191.1 + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1 + 3.19.1 + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1alpha + 3.19.1 + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta + 3.19.1 + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + org.codehaus.mojo + exec-maven-plugin + + true + + + + + diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/EnableAutoValue.txt b/java-bigquerystorage/google-cloud-bigquerystorage/EnableAutoValue.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/clirr-ignored-differences.xml b/java-bigquerystorage/google-cloud-bigquerystorage/clirr-ignored-differences.xml new file mode 100644 index 000000000000..cc4806cc7b8b --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/clirr-ignored-differences.xml @@ -0,0 +1,232 @@ + + + + + 7004 + com/google/cloud/bigquery/storage/v1/Exceptions$SchemaMismatchedException + Exceptions$SchemaMismatchedException(io.grpc.Status, io.grpc.Metadata, java.lang.String) + + + 7004 + com/google/cloud/bigquery/storage/v1/Exceptions$StreamFinalizedException + Exceptions$StreamFinalizedException(io.grpc.Status, io.grpc.Metadata, java.lang.String) + + + 8001 + com/google/cloud/bigquery/storage/v1/Exceptions$WriterClosedException + + + 5001 + com/google/cloud/bigquery/storage/v1/Exceptions$StreamWriterClosedException + com/google/cloud/bigquery/storage/v1/Exceptions$StorageException + + + 7004 + com/google/cloud/bigquery/storage/v1/Exceptions$StreamWriterClosedException + Exceptions$StreamWriterClosedException(io.grpc.Status, java.lang.String) + + + 7004 + com/google/cloud/bigquery/storage/v1/ConnectionWorker + com.google.api.core.ApiFuture append(com.google.cloud.bigquery.storage.v1.ProtoRows, long) + + + 7009 + com/google/cloud/bigquery/storage/v1/ConnectionWorker + com.google.api.core.ApiFuture append(com.google.cloud.bigquery.storage.v1.ProtoRows, long) + + + 7002 + com/google/cloud/bigquery/storage/v1/ConnectionWorker + com.google.api.core.ApiFuture append(com.google.cloud.bigquery.storage.v1.ProtoRows) + + + 7002 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool$Settings$Builder + com.google.cloud.bigquery.storage.v1.ConnectionWorkerPool$Settings$Builder setMaxConnectionsPerPool(int) + + + 7013 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool$Settings$Builder + com.google.cloud.bigquery.storage.v1.ConnectionWorkerPool$Settings$Builder setMaxConnectionsPerRegion(int) + + + 7002 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool$Settings$Builder + com.google.cloud.bigquery.storage.v1.ConnectionWorkerPool$Settings$Builder setMinConnectionsPerPool(int) + + + 7013 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool$Settings$Builder + com.google.cloud.bigquery.storage.v1.ConnectionWorkerPool$Settings$Builder setMinConnectionsPerRegion(int) + + + 7004 + com/google/cloud/bigquery/storage/v1/Exceptions$AppendSerializtionError + Exceptions$AppendSerializtionError(java.lang.String, java.util.Map) + + + 7006 + com/google/cloud/bigquery/storage/v1/ConnectionWorker + com.google.cloud.bigquery.storage.v1.TableSchema getUpdatedSchema() + com.google.cloud.bigquery.storage.v1.ConnectionWorker$TableSchemaAndTimestamp + + + 7009 + com/google/cloud/bigquery/storage/v1/ConnectionWorker + com.google.cloud.bigquery.storage.v1.TableSchema getUpdatedSchema() + + + 7004 + com/google/cloud/bigquery/storage/v1/ConnectionWorker + ConnectionWorker(java.lang.String, com.google.cloud.bigquery.storage.v1.ProtoSchema, long, long, com.google.api.gax.batching.FlowController$LimitExceededBehavior, java.lang.String, com.google.cloud.bigquery.storage.v1.BigQueryWriteClient, boolean) + + + 7004 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool + ConnectionWorkerPool(long, long, com.google.api.gax.batching.FlowController$LimitExceededBehavior, java.lang.String, com.google.cloud.bigquery.storage.v1.BigQueryWriteClient, boolean) + + + 7005 + com/google/cloud/bigquery/storage/v1/ConnectionWorker + ConnectionWorker(java.lang.String, com.google.cloud.bigquery.storage.v1.ProtoSchema, long, long, com.google.api.gax.batching.FlowController$LimitExceededBehavior, java.lang.String, com.google.cloud.bigquery.storage.v1.BigQueryWriteClient, boolean) + ConnectionWorker(java.lang.String, com.google.cloud.bigquery.storage.v1.ProtoSchema, long, long, java.time.Duration, com.google.api.gax.batching.FlowController$LimitExceededBehavior, java.lang.String, com.google.cloud.bigquery.storage.v1.BigQueryWriteSettings) + + + 7005 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool + ConnectionWorkerPool(long, long, com.google.api.gax.batching.FlowController$LimitExceededBehavior, java.lang.String, com.google.cloud.bigquery.storage.v1.BigQueryWriteClient, boolean) + ConnectionWorkerPool(long, long, java.time.Duration, com.google.api.gax.batching.FlowController$LimitExceededBehavior, java.lang.String, com.google.cloud.bigquery.storage.v1.BigQueryWriteSettings) + + + 7004 + com/google/cloud/bigquery/storage/v1/ConnectionWorker + ConnectionWorker(java.lang.String, com.google.cloud.bigquery.storage.v1.ProtoSchema, long, long, java.time.Duration, com.google.api.gax.batching.FlowController$LimitExceededBehavior, java.lang.String, com.google.cloud.bigquery.storage.v1.BigQueryWriteClient, boolean) + + + 7004 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool + ConnectionWorkerPool(long, long, java.time.Duration, com.google.api.gax.batching.FlowController$LimitExceededBehavior, java.lang.String, com.google.cloud.bigquery.storage.v1.BigQueryWriteClient, boolean) + + + 1001 + com/google/cloud/bigquery/storage/v1/ConnectionWorker + + + 7009 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool + ConnectionWorkerPool(long, long, java.time.Duration, com.google.api.gax.batching.FlowController$LimitExceededBehavior, java.lang.String, com.google.cloud.bigquery.storage.v1.BigQueryWriteSettings) + + + 7009 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool + com.google.api.core.ApiFuture append(com.google.cloud.bigquery.storage.v1.StreamWriter, com.google.cloud.bigquery.storage.v1.ProtoRows) + + + 7009 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool + com.google.api.core.ApiFuture append(com.google.cloud.bigquery.storage.v1.StreamWriter, com.google.cloud.bigquery.storage.v1.ProtoRows, long) + + + 7009 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool + void close(com.google.cloud.bigquery.storage.v1.StreamWriter) + + + 7009 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool + void enableTestingLogic() + + + 7009 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool + long getInflightWaitSeconds(com.google.cloud.bigquery.storage.v1.StreamWriter) + + + 7009 + com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool + ConnectionWorkerPool(long, long, java.time.Duration, com.google.api.gax.batching.FlowController$LimitExceededBehavior, java.lang.String, com.google.cloud.bigquery.storage.v1.BigQueryWriteSettings) + + + 7002 + com/google/cloud/bigquery/storage/v1/StreamWriter + boolean isDone() + + + 7002 + com/google/cloud/bigquery/storage/v1/JsonStreamWriter + boolean isDone() + + + 7006 + com/google/cloud/bigquery/storage/v1/ToProtoConverter + com.google.protobuf.DynamicMessage convertToProtoMessage(com.google.protobuf.Descriptors$Descriptor, com.google.cloud.bigquery.storage.v1.TableSchema, java.lang.Object, boolean) + java.util.List + + + 7005 + com/google/cloud/bigquery/storage/v1/ToProtoConverter + com.google.protobuf.DynamicMessage convertToProtoMessage(com.google.protobuf.Descriptors$Descriptor, com.google.cloud.bigquery.storage.v1.TableSchema, java.lang.Object, boolean) + com.google.protobuf.DynamicMessage convertToProtoMessage(com.google.protobuf.Descriptors$Descriptor, com.google.cloud.bigquery.storage.v1.TableSchema, java.lang.Iterable, boolean) + + + 1001 + com/google/cloud/bigquery/storage/v1/StreamConnection + + + 7002 + com/google/cloud/bigquery/storage/v1/StreamWriter$Builder + com.google.cloud.bigquery.storage.v1.StreamWriter$Builder setMaxRetryNumAttempts(int) + + + 7002 + com/google/cloud/bigquery/storage/v1/StreamWriter$Builder + com.google.cloud.bigquery.storage.v1.StreamWriter$Builder setRetryMultiplier(double) + + + 7002 + com/google/cloud/bigquery/storage/v1/StreamWriter$Builder + com.google.cloud.bigquery.storage.v1.StreamWriter$Builder setRetryFirstDelay(org.threeten.bp.Duration) + + + 7002 + com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter$Builder + com.google.cloud.bigquery.storage.v1.SchemaAwareStreamWriter$Builder setTraceIdBase(java.lang.String) + + + 1001 + com/google/cloud/bigquery/storage/v1/StreamWriter$SingleConnectionOrConnectionPool + + + 7002 + com/google/cloud/bigquery/storage/v1/JsonStreamWriter + void setMissingValueInterpretationMap(java.util.Map) + + + 7002 + com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter + void setMissingValueInterpretationMap(java.util.Map) + + + 7002 + com/google/cloud/bigquery/storage/v1/StreamWriter + void setMissingValueInterpretationMap(java.util.Map) + + + + 6004 + com/google/cloud/bigquery/storage/*/stub/readrows/ApiResultRetryAlgorithm + DEADLINE_SLEEP_DURATION + org.threeten.bp.Duration + java.time.Duration + + + + 6004 + com/google/cloud/bigquery/storage/util/Errors$IsRetryableStatusResult + retryDelay + org.threeten.bp.Duration + java.time.Duration + + + diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/pom.xml b/java-bigquerystorage/google-cloud-bigquerystorage/pom.xml new file mode 100644 index 000000000000..089b48d3e85f --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/pom.xml @@ -0,0 +1,445 @@ + + + 4.0.0 + com.google.cloud + google-cloud-bigquerystorage + 3.19.1 + jar + BigQuery Storage + https://github.com/googleapis/java-bigquerystorage + BigQuery Storage + + com.google.cloud + google-cloud-bigquerystorage-parent + 3.19.1 + + + google-cloud-bigquerystorage + + + + + org.apache.arrow + arrow-vector + 17.0.0 + + + org.apache.arrow + arrow-memory-core + 17.0.0 + + + + + + + kr.motd.maven + os-maven-plugin + 1.7.1 + + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + 3.5.2 + + + org.apache.maven.surefire + surefire-junit-platform + ${surefire.version} + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:${project.protobuf-java.version}:exe:${os.detected.classifier} + + + + + test-compile + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.api + api-common + + + com.google.auto.value + auto-value-annotations + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1alpha + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta1 + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta2 + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1 + + + com.google.code.gson + gson + + + com.google.guava + guava + + + com.google.api + gax + + + com.google.api + gax-grpc + + + io.grpc + grpc-xds + + + io.grpc + grpc-services + + + com.google.re2j + re2j + + + org.bouncycastle + bcprov-jdk15on + + + io.opencensus + opencensus-proto + + + + + org.threeten + threetenbp + + + org.json + json + + + com.google.auth + google-auth-library-credentials + + + io.opentelemetry + opentelemetry-api + + + io.opentelemetry + opentelemetry-context + + + org.apache.arrow + arrow-vector + + + + + io.opentelemetry + opentelemetry-sdk + test + + + io.opentelemetry + opentelemetry-sdk-common + test + + + io.opentelemetry + opentelemetry-sdk-trace + test + + + junit + junit + test + + + org.junit.jupiter + junit-jupiter-api + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + + + org.junit.vintage + junit-vintage-engine + test + + + com.google.http-client + google-http-client + 1.47.1 + test + + + com.google.truth + truth + 1.4.4 + test + + + org.checkerframework + checker-qual + + + + + org.mockito + mockito-core + 3.12.4 + test + + + org.apache.avro + avro + 1.11.4 + test + + + org.apache.arrow + arrow-memory-core + test + + + com.google.protobuf + protobuf-java-util + test + + + com.google.cloud + google-cloud-bigquery + test + + + com.google.auth + google-auth-library-oauth2-http + test + + + com.google.code.findbugs + jsr305 + + + com.google.errorprone + error_prone_annotations + + + com.google.cloud + google-cloud-core + runtime + + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta1 + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta + test + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1alpha + test + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta2 + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1 + + + + + com.google.api + gax-grpc + testlib + test + + + + + + arrow-config + + [9,) + + + + + org.apache.maven.plugins + maven-compiler-plugin + + UTF-8 + true + + -J--add-opens=java.base/java.nio=org.apache.arrow.memory.core,ALL-UNNAMED + -J--add-opens=java.base/java.nio=java-base,ALL-UNNAMED + + + + + + + + + customNative + + + + org.opentest4j + opentest4j + ${opentest4j.version} + + + org.junit.jupiter + junit-jupiter-engine + + + org.junit.vintage + junit-vintage-engine + ${junit-vintage-engine.version} + + + + + + org.apache.maven.plugins + maven-surefire-plugin + ${surefire.version} + + + org.junit.jupiter + junit-jupiter-engine + ${junit-vintage-engine.version} + + + org.junit.vintage + junit-vintage-engine + ${junit-vintage-engine.version} + + + + + + **/ITBigQueryWrite*RetryTest.java + + + **/IT*.java + + **/*ClientTest.java + + + + + org.graalvm.buildtools + native-maven-plugin + ${native-maven-plugin.version} + true + + + test-native + + test + + test + + + + + --no-fallback + --no-server + + + + + + + + java17 + + [17,) + + + !jvm + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + --add-opens=java.base/java.nio=ALL-UNNAMED + + + + + + + diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/util/Errors.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/util/Errors.java new file mode 100644 index 000000000000..85da2e3b5b2b --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/util/Errors.java @@ -0,0 +1,85 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.util; + +import com.google.rpc.RetryInfo; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.protobuf.ProtoUtils; +import java.time.Duration; + +/** Static utility methods for working with Errors returned from the service. */ +public class Errors { + private Errors() {} + + public static class IsRetryableStatusResult { + public boolean isRetryable = false; + public Duration retryDelay = null; + } + + private static final Metadata.Key KEY_RETRY_INFO = + ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + + /** + * Returns true iff the Status indicates an error that is retryable. + * + *

Generally, internal errors are not considered retryable, however there are certain transient + * network issues that appear as internal but are in fact retryable. + * + *

Resource exhausted errors are only considered retryable if metadata contains a serialized + * RetryInfo object. + */ + public static IsRetryableStatusResult isRetryableStatus(Status status, Metadata metadata) { + IsRetryableStatusResult result = new IsRetryableStatusResult(); + + result.isRetryable = isRetryableInternalStatus(status); + if (!result.isRetryable + && status.getCode() == Status.Code.RESOURCE_EXHAUSTED + && metadata != null + && metadata.containsKey(KEY_RETRY_INFO)) { + RetryInfo retryInfo = metadata.get(KEY_RETRY_INFO); + if (retryInfo.hasRetryDelay()) { + result.isRetryable = true; + result.retryDelay = + Duration.ofSeconds( + retryInfo.getRetryDelay().getSeconds(), retryInfo.getRetryDelay().getNanos()); + } + } + + return result; + } + + /** + * Returns true iff the Status indicates and internal error that is retryable. + * + *

Generally, internal errors are not considered retryable, however there are certain transient + * network issues that appear as internal but are in fact retryable. + */ + public static boolean isRetryableInternalStatus(Status status) { + String description = status.getDescription(); + return status.getCode() == Status.Code.INTERNAL + && description != null + && (description.contains("Received unexpected EOS ") + || description.contains(" Rst ") + || description.contains("Rst Stream") + || description.contains("RST_STREAM") + || description.contains( + "INTERNAL: A retriable error could not be retried due to Extensible Stubs memory" + + " limits for streams") + || description.contains("Connection closed with unknown cause") + || description.contains("HTTP/2 error code: INTERNAL_ERROR")); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/util/TimeConversionUtils.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/util/TimeConversionUtils.java new file mode 100644 index 000000000000..56f719cc4309 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/util/TimeConversionUtils.java @@ -0,0 +1,61 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.util; + +import com.google.api.core.InternalApi; + +/** + * Convenience methods for conversions between {@link java.time} and {@link org.threeten.bp} + * objects. This will be kept until this issue is solved. + */ +@InternalApi("https://github.com/googleapis/sdk-platform-java/issues/3412") +public class TimeConversionUtils { + public static java.time.LocalDateTime toJavaTimeLocalDateTime( + org.threeten.bp.LocalDateTime result) { + return java.time.LocalDateTime.of( + result.getYear(), + java.time.Month.of(result.getMonth().getValue()), + result.getDayOfMonth(), + result.getHour(), + result.getMinute(), + result.getSecond(), + result.getNano()); + } + + public static org.threeten.bp.LocalDateTime toThreetenLocalDateTime( + java.time.LocalDateTime result) { + return org.threeten.bp.LocalDateTime.of( + result.getYear(), + org.threeten.bp.Month.of(result.getMonth().getValue()), + result.getDayOfMonth(), + result.getHour(), + result.getMinute(), + result.getSecond(), + result.getNano()); + } + + public static java.time.LocalTime toJavaTimeLocalTime(org.threeten.bp.LocalTime result) { + return java.time.LocalTime.of( + result.getHour(), result.getMinute(), result.getSecond(), result.getNano()); + } + + public static org.threeten.bp.LocalTime toThreetenLocalTime(java.time.LocalTime result) { + return org.threeten.bp.LocalTime.of( + result.getHour(), result.getMinute(), result.getSecond(), result.getNano()); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/AppendFormats.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/AppendFormats.java new file mode 100644 index 000000000000..3a415875f517 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/AppendFormats.java @@ -0,0 +1,83 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.auto.value.AutoValue; +import javax.annotation.Nullable; + +/** Adapter class for data formats used in the AppendRows. */ +final class AppendFormats { + /** Enum for the data format used in the AppendRows. */ + enum DataFormat { + UNKNOWN, + PROTO, + ARROW + } + + /** Container class for the schema used in the AppendRows request. */ + @AutoValue + abstract static class AppendRowsSchema { + abstract DataFormat format(); + + @Nullable + abstract ProtoSchema protoSchema(); + + @Nullable + abstract ArrowSchema arrowSchema(); + + static AppendRowsSchema of(ProtoSchema protoSchema) { + return new AutoValue_AppendFormats_AppendRowsSchema( + DataFormat.PROTO, protoSchema, /* arrowSchema= */ null); + } + + static AppendRowsSchema of(ArrowSchema arrowSchema) { + return new AutoValue_AppendFormats_AppendRowsSchema( + DataFormat.ARROW, /* protoSchema= */ null, arrowSchema); + } + } + + /** Container class for the data used in the AppendRows request. */ + @AutoValue + abstract static class AppendRowsData { + abstract DataFormat format(); + + @Nullable + abstract ProtoRows protoRows(); + + @Nullable + abstract ArrowRecordBatch arrowRecordBatch(); + + // Row count for arrowRecordBatch. It is defaulted to -1 of not set. + abstract long recordBatchRowCount(); + + static AppendRowsData of(ProtoRows protoRows) { + return new AutoValue_AppendFormats_AppendRowsData( + DataFormat.PROTO, protoRows, /* arrowRecordBatch= */ null, /* recordBatchRowCount= */ -1); + } + + static AppendRowsData of(ArrowRecordBatch arrowRecordBatch) { + return new AutoValue_AppendFormats_AppendRowsData( + DataFormat.ARROW, /* protoRows= */ null, arrowRecordBatch, /* recordBatchRowCount= */ -1); + } + + static AppendRowsData of(ArrowRecordBatch arrowRecordBatch, long recordBatchRowCount) { + return new AutoValue_AppendFormats_AppendRowsData( + DataFormat.ARROW, /* protoRows= */ null, arrowRecordBatch, recordBatchRowCount); + } + } + + private AppendFormats() {} +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptor.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptor.java new file mode 100644 index 000000000000..6a62f454e373 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptor.java @@ -0,0 +1,263 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.DescriptorProtos.DescriptorProto; +import com.google.protobuf.DescriptorProtos.FieldDescriptorProto; +import com.google.protobuf.DescriptorProtos.FieldOptions; +import com.google.protobuf.DescriptorProtos.FileDescriptorProto; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.FileDescriptor; +import com.google.protobuf.Message; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.logging.Logger; + +/** + * Converts a BQ table schema to protobuf descriptor. All field names will be converted to lowercase + * when constructing the protobuf descriptor. The mapping between field types and field modes are + * shown in the ImmutableMaps below. + */ +public class BQTableSchemaToProtoDescriptor { + + private static final Logger LOG = + Logger.getLogger(BQTableSchemaToProtoDescriptor.class.getName()); + + private static Map DEFAULT_BQ_TABLE_SCHEMA_MODE_MAP = + ImmutableMap.of( + TableFieldSchema.Mode.NULLABLE, FieldDescriptorProto.Label.LABEL_OPTIONAL, + TableFieldSchema.Mode.REPEATED, FieldDescriptorProto.Label.LABEL_REPEATED, + TableFieldSchema.Mode.REQUIRED, FieldDescriptorProto.Label.LABEL_REQUIRED); + + private static Map + DEFAULT_BQ_TABLE_SCHEMA_TYPE_MAP = + new ImmutableMap.Builder() + .put(TableFieldSchema.Type.BOOL, FieldDescriptorProto.Type.TYPE_BOOL) + .put(TableFieldSchema.Type.BYTES, FieldDescriptorProto.Type.TYPE_BYTES) + .put(TableFieldSchema.Type.DATE, FieldDescriptorProto.Type.TYPE_INT32) + .put(TableFieldSchema.Type.DATETIME, FieldDescriptorProto.Type.TYPE_INT64) + .put(TableFieldSchema.Type.DOUBLE, FieldDescriptorProto.Type.TYPE_DOUBLE) + .put(TableFieldSchema.Type.GEOGRAPHY, FieldDescriptorProto.Type.TYPE_STRING) + .put(TableFieldSchema.Type.INT64, FieldDescriptorProto.Type.TYPE_INT64) + .put(TableFieldSchema.Type.NUMERIC, FieldDescriptorProto.Type.TYPE_BYTES) + .put(TableFieldSchema.Type.BIGNUMERIC, FieldDescriptorProto.Type.TYPE_BYTES) + .put(TableFieldSchema.Type.STRING, FieldDescriptorProto.Type.TYPE_STRING) + .put(TableFieldSchema.Type.STRUCT, FieldDescriptorProto.Type.TYPE_MESSAGE) + .put(TableFieldSchema.Type.TIME, FieldDescriptorProto.Type.TYPE_INT64) + .put(TableFieldSchema.Type.TIMESTAMP, FieldDescriptorProto.Type.TYPE_INT64) + .put(TableFieldSchema.Type.JSON, FieldDescriptorProto.Type.TYPE_STRING) + .put(TableFieldSchema.Type.INTERVAL, FieldDescriptorProto.Type.TYPE_STRING) + .put(TableFieldSchema.Type.RANGE, FieldDescriptorProto.Type.TYPE_MESSAGE) + .build(); + + /** + * Converts TableFieldSchema to a Descriptors.Descriptor object. + * + * @param BQTableSchema + * @throws Descriptors.DescriptorValidationException + */ + public static Descriptor convertBQTableSchemaToProtoDescriptor(TableSchema BQTableSchema) + throws Descriptors.DescriptorValidationException { + Preconditions.checkNotNull(BQTableSchema, "BQTableSchema is null."); + return convertBQTableSchemaToProtoDescriptorImpl( + BQTableSchema, "root", new HashMap, Descriptor>()); + } + + /** + * Converts a TableFieldSchema to a Descriptors.Descriptor object. + * + * @param BQTableSchema + * @param scope Keeps track of current scope to prevent repeated naming while constructing + * descriptor. + * @param dependencyMap Stores already constructed descriptors to prevent reconstruction + * @throws Descriptors.DescriptorValidationException + */ + private static Descriptor convertBQTableSchemaToProtoDescriptorImpl( + TableSchema BQTableSchema, + String scope, + HashMap, Descriptor> dependencyMap) + throws Descriptors.DescriptorValidationException, IllegalArgumentException { + List dependenciesList = new ArrayList(); + List fields = new ArrayList(); + int index = 1; + for (TableFieldSchema BQTableField : BQTableSchema.getFieldsList()) { + String scopeName = + BigQuerySchemaUtil.isProtoCompatible(BQTableField.getName()) + ? BQTableField.getName() + : BigQuerySchemaUtil.generatePlaceholderFieldName(BQTableField.getName()); + String currentScope = scope + "__" + scopeName; + switch (BQTableField.getType()) { + case STRUCT: + ImmutableList fieldList = + ImmutableList.copyOf(BQTableField.getFieldsList()); + if (dependencyMap.containsKey(fieldList)) { + Descriptor descriptor = dependencyMap.get(fieldList); + dependenciesList.add(descriptor.getFile()); + fields.add( + convertBQTableFieldToProtoField(BQTableField, index++, descriptor.getName())); + } else { + Descriptor descriptor = + convertBQTableSchemaToProtoDescriptorImpl( + TableSchema.newBuilder().addAllFields(fieldList).build(), + currentScope, + dependencyMap); + dependenciesList.add(descriptor.getFile()); + dependencyMap.put(fieldList, descriptor); + fields.add(convertBQTableFieldToProtoField(BQTableField, index++, currentScope)); + } + break; + case RANGE: + switch (BQTableField.getRangeElementType().getType()) { + case DATE: + case DATETIME: + case TIMESTAMP: + break; + default: + throw new IllegalArgumentException( + String.format( + "Error: %s of type RANGE requires range element type (DATE, DATETIME," + + " TIMESTAMP)", + currentScope)); + } + // For RANGE type, expliclitly add the fields start and end of the same FieldElementType + // as it is not expliclity defined in the TableSchema. + ImmutableList rangeFields = + ImmutableList.of( + TableFieldSchema.newBuilder() + .setType(BQTableField.getRangeElementType().getType()) + .setName("start") + .setMode(Mode.NULLABLE) + .setTimestampPrecision(BQTableField.getTimestampPrecision()) + .build(), + TableFieldSchema.newBuilder() + .setType(BQTableField.getRangeElementType().getType()) + .setName("end") + .setMode(Mode.NULLABLE) + .setTimestampPrecision(BQTableField.getTimestampPrecision()) + .build()); + + if (dependencyMap.containsKey(rangeFields)) { + Descriptor descriptor = dependencyMap.get(rangeFields); + dependenciesList.add(descriptor.getFile()); + fields.add( + convertBQTableFieldToProtoField(BQTableField, index++, descriptor.getName())); + } else { + Descriptor descriptor = + convertBQTableSchemaToProtoDescriptorImpl( + TableSchema.newBuilder().addAllFields(rangeFields).build(), + currentScope, + dependencyMap); + dependenciesList.add(descriptor.getFile()); + dependencyMap.put(rangeFields, descriptor); + fields.add(convertBQTableFieldToProtoField(BQTableField, index++, currentScope)); + } + break; + default: + fields.add(convertBQTableFieldToProtoField(BQTableField, index++, currentScope)); + break; + } + } + FileDescriptor[] dependenciesArray = new FileDescriptor[dependenciesList.size()]; + dependenciesArray = dependenciesList.toArray(dependenciesArray); + DescriptorProto descriptorProto = + DescriptorProto.newBuilder().setName(scope).addAllField(fields).build(); + FileDescriptorProto fileDescriptorProto = + FileDescriptorProto.newBuilder().addMessageType(descriptorProto).build(); + FileDescriptor fileDescriptor = + FileDescriptor.buildFrom(fileDescriptorProto, dependenciesArray); + Descriptor descriptor = fileDescriptor.findMessageTypeByName(scope); + return descriptor; + } + + /** + * Converts a BQTableField to ProtoField + * + * @param BQTableField BQ Field used to construct a FieldDescriptorProto + * @param index Index for protobuf fields. + * @param scope used to name descriptors + */ + static FieldDescriptorProto convertBQTableFieldToProtoField( + TableFieldSchema BQTableField, int index, String scope) { + TableFieldSchema.Mode mode = BQTableField.getMode(); + String fieldName = BQTableField.getName().toLowerCase(); + + FieldDescriptorProto.Builder fieldDescriptor = + FieldDescriptorProto.newBuilder() + .setName(fieldName) + .setNumber(index) + .setLabel((FieldDescriptorProto.Label) DEFAULT_BQ_TABLE_SCHEMA_MODE_MAP.get(mode)); + + switch (BQTableField.getType()) { + case STRUCT: + fieldDescriptor.setTypeName(scope); + break; + case RANGE: + fieldDescriptor.setType( + (FieldDescriptorProto.Type) + DEFAULT_BQ_TABLE_SCHEMA_TYPE_MAP.get(BQTableField.getType())); + fieldDescriptor.setTypeName(scope); + break; + case TIMESTAMP: + // Can map to either int64 or string based on the BQ Field's timestamp precision + // Default: microsecond (6) maps to int64 and picosecond (12) maps to string. + long timestampPrecision = BQTableField.getTimestampPrecision().getValue(); + if (timestampPrecision == 12L) { + fieldDescriptor.setType( + (FieldDescriptorProto.Type) FieldDescriptorProto.Type.TYPE_STRING); + break; + } + // This should never happen as this is a server response issue. If this is the case, + // warn the user and use INT64 as the default is microsecond precision. + if (timestampPrecision != 6L && timestampPrecision != 0L) { + LOG.warning( + "BigQuery Timestamp field " + + BQTableField.getName() + + " has timestamp precision that is not 6 or 12. Defaulting to microsecond" + + " precision and mapping to INT64 protobuf type."); + } + // If the timestampPrecision value comes back as a null result from the server, + // timestampPrecision has a value of 0L. Use the INT64 to map to the type used + // for the default precision (microsecond). + fieldDescriptor.setType((FieldDescriptorProto.Type) FieldDescriptorProto.Type.TYPE_INT64); + break; + default: + fieldDescriptor.setType( + (FieldDescriptorProto.Type) + DEFAULT_BQ_TABLE_SCHEMA_TYPE_MAP.get(BQTableField.getType())); + break; + } + + // Sets columnName annotation when field name is not proto comptaible. + if (!BigQuerySchemaUtil.isProtoCompatible(fieldName)) { + fieldDescriptor.setName(BigQuerySchemaUtil.generatePlaceholderFieldName(fieldName)); + + // The following work around (instead of setting FieldOptions directly) for when + // FieldOptions.Builder changes from GeneratedMessageV3 in 3.25 to GeneratedMessage in 4.28 as + // it no longer depends on FieldOptions. + Message.Builder fieldOptionBuilder = FieldOptions.newBuilder(); + fieldOptionBuilder.setField(AnnotationsProto.columnName.getDescriptor(), fieldName); + fieldDescriptor.setOptions((FieldOptions) fieldOptionBuilder.build()); + } + return fieldDescriptor.build(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClient.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClient.java new file mode 100644 index 000000000000..05f482e8bb48 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClient.java @@ -0,0 +1,558 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1.stub.BigQueryReadStub; +import com.google.cloud.bigquery.storage.v1.stub.BigQueryReadStubSettings; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: BigQuery Read API. + * + *

The Read API can be used to read data from BigQuery. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+ *   ProjectName parent = ProjectName.of("[PROJECT]");
+ *   ReadSession readSession = ReadSession.newBuilder().build();
+ *   int maxStreamCount = 940837515;
+ *   ReadSession response =
+ *       baseBigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the BaseBigQueryReadClient object to clean up resources + * such as threads. In the example above, try-with-resources is used, which automatically calls + * close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

CreateReadSession

Creates a new read session. A read session divides the contents of a BigQuery table into one or more streams, which can then be used to read data from the table. The read session also specifies properties of the data to be read, such as a list of columns or a push-down filter describing the rows to be returned. + *

A particular row can be read by at most one stream. When the caller has reached the end of each stream in the session, then all the data in the table has been read. + *

Data is assigned to each stream such that roughly the same number of rows can be read from each stream. Because the server-side unit for assigning data is collections of rows, the API does not guarantee that each stream will return the same number or rows. Additionally, the limits are enforced based on the number of pre-filtered rows, so some filters can lead to lopsided assignments. + *

Read sessions automatically expire 6 hours after they are created and do not require manual clean-up by the caller.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createReadSession(CreateReadSessionRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createReadSession(ProjectName parent, ReadSession readSession, int maxStreamCount) + *

  • createReadSession(String parent, ReadSession readSession, int maxStreamCount) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createReadSessionCallable() + *

+ *

ReadRows

Reads rows from the stream in the format prescribed by the ReadSession. Each response contains one or more table rows, up to a maximum of 128 MB per response; read requests which attempt to read individual rows larger than 128 MB will fail. + *

Each request also returns a set of stream statistics reflecting the current state of the stream.

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • readRowsCallable() + *

+ *

SplitReadStream

Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are referred to as the primary and the residual streams of the split. The original `ReadStream` can still be read from in the same manner as before. Both of the returned `ReadStream` objects can also be read from, and the rows returned by both child streams will be the same as the rows read from the original stream. + *

Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. Concretely, it is guaranteed that for streams original, primary, and residual, that original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read to completion.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • splitReadStream(SplitReadStreamRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • splitReadStreamCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of BaseBigQueryReadSettings to + * create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BaseBigQueryReadSettings baseBigQueryReadSettings =
+ *     BaseBigQueryReadSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * BaseBigQueryReadClient baseBigQueryReadClient =
+ *     BaseBigQueryReadClient.create(baseBigQueryReadSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BaseBigQueryReadSettings baseBigQueryReadSettings =
+ *     BaseBigQueryReadSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * BaseBigQueryReadClient baseBigQueryReadClient =
+ *     BaseBigQueryReadClient.create(baseBigQueryReadSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@Generated("by gapic-generator-java") +public class BaseBigQueryReadClient implements BackgroundResource { + private final BaseBigQueryReadSettings settings; + private final BigQueryReadStub stub; + + /** Constructs an instance of BaseBigQueryReadClient with default settings. */ + public static final BaseBigQueryReadClient create() throws IOException { + return create(BaseBigQueryReadSettings.newBuilder().build()); + } + + /** + * Constructs an instance of BaseBigQueryReadClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final BaseBigQueryReadClient create(BaseBigQueryReadSettings settings) + throws IOException { + return new BaseBigQueryReadClient(settings); + } + + /** + * Constructs an instance of BaseBigQueryReadClient, using the given stub for making calls. This + * is for advanced usage - prefer using create(BaseBigQueryReadSettings). + */ + public static final BaseBigQueryReadClient create(BigQueryReadStub stub) { + return new BaseBigQueryReadClient(stub); + } + + /** + * Constructs an instance of BaseBigQueryReadClient, using the given settings. This is protected + * so that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected BaseBigQueryReadClient(BaseBigQueryReadSettings settings) throws IOException { + this.settings = settings; + this.stub = ((BigQueryReadStubSettings) settings.getStubSettings()).createStub(); + } + + protected BaseBigQueryReadClient(BigQueryReadStub stub) { + this.settings = null; + this.stub = stub; + } + + public final BaseBigQueryReadSettings getSettings() { + return settings; + } + + public BigQueryReadStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 6 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   ProjectName parent = ProjectName.of("[PROJECT]");
+   *   ReadSession readSession = ReadSession.newBuilder().build();
+   *   int maxStreamCount = 940837515;
+   *   ReadSession response =
+   *       baseBigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+   * }
+   * }
+ * + * @param parent Required. The request project that owns the session, in the form of + * `projects/{project_id}`. + * @param readSession Required. Session to be created. + * @param maxStreamCount Max initial number of streams. If unset or zero, the server will provide + * a value of streams so as to produce reasonable throughput. Must be non-negative. The number + * of streams may be lower than the requested number, depending on the amount parallelism that + * is reasonable for the table. There is a default system max limit of 1,000. + *

This must be greater than or equal to preferred_min_stream_count. Typically, clients + * should either leave this unset to let the system to determine an upper bound OR set this a + * size for the maximum "units of work" it can gracefully handle. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession( + ProjectName parent, ReadSession readSession, int maxStreamCount) { + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setReadSession(readSession) + .setMaxStreamCount(maxStreamCount) + .build(); + return createReadSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 6 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   String parent = ProjectName.of("[PROJECT]").toString();
+   *   ReadSession readSession = ReadSession.newBuilder().build();
+   *   int maxStreamCount = 940837515;
+   *   ReadSession response =
+   *       baseBigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+   * }
+   * }
+ * + * @param parent Required. The request project that owns the session, in the form of + * `projects/{project_id}`. + * @param readSession Required. Session to be created. + * @param maxStreamCount Max initial number of streams. If unset or zero, the server will provide + * a value of streams so as to produce reasonable throughput. Must be non-negative. The number + * of streams may be lower than the requested number, depending on the amount parallelism that + * is reasonable for the table. There is a default system max limit of 1,000. + *

This must be greater than or equal to preferred_min_stream_count. Typically, clients + * should either leave this unset to let the system to determine an upper bound OR set this a + * size for the maximum "units of work" it can gracefully handle. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession( + String parent, ReadSession readSession, int maxStreamCount) { + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parent) + .setReadSession(readSession) + .setMaxStreamCount(maxStreamCount) + .build(); + return createReadSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 6 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   CreateReadSessionRequest request =
+   *       CreateReadSessionRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setReadSession(ReadSession.newBuilder().build())
+   *           .setMaxStreamCount(940837515)
+   *           .setPreferredMinStreamCount(-1905507237)
+   *           .build();
+   *   ReadSession response = baseBigQueryReadClient.createReadSession(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession(CreateReadSessionRequest request) { + return createReadSessionCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 6 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   CreateReadSessionRequest request =
+   *       CreateReadSessionRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setReadSession(ReadSession.newBuilder().build())
+   *           .setMaxStreamCount(940837515)
+   *           .setPreferredMinStreamCount(-1905507237)
+   *           .build();
+   *   ApiFuture future =
+   *       baseBigQueryReadClient.createReadSessionCallable().futureCall(request);
+   *   // Do something.
+   *   ReadSession response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createReadSessionCallable() { + return stub.createReadSessionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Reads rows from the stream in the format prescribed by the ReadSession. Each response contains + * one or more table rows, up to a maximum of 128 MB per response; read requests which attempt to + * read individual rows larger than 128 MB will fail. + * + *

Each request also returns a set of stream statistics reflecting the current state of the + * stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   ReadRowsRequest request =
+   *       ReadRowsRequest.newBuilder()
+   *           .setReadStream(
+   *               ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString())
+   *           .setOffset(-1019779949)
+   *           .build();
+   *   ServerStream stream =
+   *       baseBigQueryReadClient.readRowsCallable().call(request);
+   *   for (ReadRowsResponse response : stream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final ServerStreamingCallable readRowsCallable() { + return stub.readRowsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are + * referred to as the primary and the residual streams of the split. The original `ReadStream` can + * still be read from in the same manner as before. Both of the returned `ReadStream` objects can + * also be read from, and the rows returned by both child streams will be the same as the rows + * read from the original stream. + * + *

Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. + * Concretely, it is guaranteed that for streams original, primary, and residual, that + * original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read + * to completion. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   SplitReadStreamRequest request =
+   *       SplitReadStreamRequest.newBuilder()
+   *           .setName(
+   *               ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString())
+   *           .setFraction(-1653751294)
+   *           .build();
+   *   SplitReadStreamResponse response = baseBigQueryReadClient.splitReadStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final SplitReadStreamResponse splitReadStream(SplitReadStreamRequest request) { + return splitReadStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are + * referred to as the primary and the residual streams of the split. The original `ReadStream` can + * still be read from in the same manner as before. Both of the returned `ReadStream` objects can + * also be read from, and the rows returned by both child streams will be the same as the rows + * read from the original stream. + * + *

Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. + * Concretely, it is guaranteed that for streams original, primary, and residual, that + * original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read + * to completion. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   SplitReadStreamRequest request =
+   *       SplitReadStreamRequest.newBuilder()
+   *           .setName(
+   *               ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString())
+   *           .setFraction(-1653751294)
+   *           .build();
+   *   ApiFuture future =
+   *       baseBigQueryReadClient.splitReadStreamCallable().futureCall(request);
+   *   // Do something.
+   *   SplitReadStreamResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + splitReadStreamCallable() { + return stub.splitReadStreamCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadSettings.java new file mode 100644 index 000000000000..a2241743131c --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadSettings.java @@ -0,0 +1,223 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.ApiFunction; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1.stub.BigQueryReadStubSettings; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BaseBigQueryReadClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createReadSession: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BaseBigQueryReadSettings.Builder baseBigQueryReadSettingsBuilder =
+ *     BaseBigQueryReadSettings.newBuilder();
+ * baseBigQueryReadSettingsBuilder
+ *     .createReadSessionSettings()
+ *     .setRetrySettings(
+ *         baseBigQueryReadSettingsBuilder
+ *             .createReadSessionSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * BaseBigQueryReadSettings baseBigQueryReadSettings = baseBigQueryReadSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@Generated("by gapic-generator-java") +public class BaseBigQueryReadSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings createReadSessionSettings() { + return ((BigQueryReadStubSettings) getStubSettings()).createReadSessionSettings(); + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings readRowsSettings() { + return ((BigQueryReadStubSettings) getStubSettings()).readRowsSettings(); + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return ((BigQueryReadStubSettings) getStubSettings()).splitReadStreamSettings(); + } + + public static final BaseBigQueryReadSettings create(BigQueryReadStubSettings stub) + throws IOException { + return new BaseBigQueryReadSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return BigQueryReadStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return BigQueryReadStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return BigQueryReadStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return BigQueryReadStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return BigQueryReadStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return BigQueryReadStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return BigQueryReadStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BaseBigQueryReadSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for BaseBigQueryReadSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(BigQueryReadStubSettings.newBuilder(clientContext)); + } + + protected Builder(BaseBigQueryReadSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(BigQueryReadStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(BigQueryReadStubSettings.newBuilder()); + } + + public BigQueryReadStubSettings.Builder getStubSettingsBuilder() { + return ((BigQueryReadStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return getStubSettingsBuilder().createReadSessionSettings(); + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return getStubSettingsBuilder().readRowsSettings(); + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder + splitReadStreamSettings() { + return getStubSettingsBuilder().splitReadStreamSettings(); + } + + @Override + public BaseBigQueryReadSettings build() throws IOException { + return new BaseBigQueryReadSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigDecimalByteStringEncoder.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigDecimalByteStringEncoder.java new file mode 100644 index 000000000000..1beab32e95b8 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigDecimalByteStringEncoder.java @@ -0,0 +1,112 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This code was ported from ZetaSQL and can be found here: + * https://github.com/google/zetasql/blob/c55f967a5ae35b476437210c529691d8a73f5507/java/com/google/zetasql/Value.java + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.common.primitives.Bytes; +import com.google.protobuf.ByteString; +import java.math.BigDecimal; +import java.math.BigInteger; + +public class BigDecimalByteStringEncoder { + private static int NUMERIC_SCALE = 9; + private static final BigDecimal MAX_NUMERIC_VALUE = + new BigDecimal("99999999999999999999999999999.999999999"); + private static final BigDecimal MIN_NUMERIC_VALUE = + new BigDecimal("-99999999999999999999999999999.999999999"); + + // Number of digits after the decimal point supported by the BIGNUMERIC data type. + private static final int BIGNUMERIC_SCALE = 38; + // Maximum and minimum allowed values for the BIGNUMERIC data type. + private static final BigDecimal MAX_BIGNUMERIC_VALUE = + new BigDecimal( + "578960446186580977117854925043439539266.34992332820282019728792003956564819967"); + private static final BigDecimal MIN_BIGNUMERIC_VALUE = + new BigDecimal( + "-578960446186580977117854925043439539266.34992332820282019728792003956564819968"); + + public static ByteString encodeToNumericByteString(BigDecimal bigDecimal) { + ByteString byteString = + serializeBigDecimal( + bigDecimal, NUMERIC_SCALE, MAX_NUMERIC_VALUE, MIN_NUMERIC_VALUE, "ByteString"); + return byteString; + } + + public static ByteString encodeToBigNumericByteString(BigDecimal bigDecimal) { + ByteString byteString = + serializeBigDecimal( + bigDecimal, BIGNUMERIC_SCALE, MAX_BIGNUMERIC_VALUE, MIN_BIGNUMERIC_VALUE, "ByteString"); + return byteString; + } + + public static BigDecimal decodeNumericByteString(ByteString byteString) { + BigDecimal bigDecimal = + deserializeBigDecimal( + byteString, NUMERIC_SCALE, MAX_NUMERIC_VALUE, MIN_NUMERIC_VALUE, "BigDecimal"); + return bigDecimal; + } + + public static BigDecimal decodeBigNumericByteString(ByteString byteString) { + BigDecimal bigDecimal = + deserializeBigDecimal( + byteString, BIGNUMERIC_SCALE, MAX_BIGNUMERIC_VALUE, MIN_BIGNUMERIC_VALUE, "BigDecimal"); + return bigDecimal; + } + + // Make these private and make public wrapper that internalizes these min/max/scale/type + private static BigDecimal deserializeBigDecimal( + ByteString serializedValue, + int scale, + BigDecimal maxValue, + BigDecimal minValue, + String typeName) { + byte[] bytes = serializedValue.toByteArray(); + // NUMERIC/BIGNUMERIC values are serialized as scaled integers in two's complement form in + // little endian order. BigInteger requires the same encoding but in big endian order, + // therefore we must reverse the bytes that come from the proto. + Bytes.reverse(bytes); + BigInteger scaledValue = new BigInteger(bytes); + BigDecimal decimalValue = new BigDecimal(scaledValue, scale); + if (decimalValue.compareTo(maxValue) > 0 || decimalValue.compareTo(minValue) < 0) { + throw new IllegalArgumentException(typeName + " overflow: " + decimalValue.toPlainString()); + } + return decimalValue; + } + + /** Returns a numeric Value that equals to {@code v}. */ + private static ByteString serializeBigDecimal( + BigDecimal v, int scale, BigDecimal maxValue, BigDecimal minValue, String typeName) { + if (v.scale() > scale) { + throw new IllegalArgumentException( + typeName + " scale cannot exceed " + scale + ": " + v.toPlainString()); + } + if (v.compareTo(maxValue) > 0 || v.compareTo(minValue) < 0) { + throw new IllegalArgumentException(typeName + " overflow: " + v.toPlainString()); + } + byte[] bytes = v.setScale(scale).unscaledValue().toByteArray(); + // NUMERIC/BIGNUMERIC values are serialized as scaled integers in two's complement form in + // little endian + // order. BigInteger requires the same encoding but in big endian order, therefore we must + // reverse the bytes that come from the proto. + Bytes.reverse(bytes); + return ByteString.copyFrom(bytes); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadClient.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadClient.java new file mode 100644 index 000000000000..c44a3ecc81a8 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadClient.java @@ -0,0 +1,581 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1.stub.EnhancedBigQueryReadStub; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.TracerProvider; +import io.opentelemetry.context.Scope; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +/** + * Service Description: BigQuery Read API. + * + *

The Read API can be used to read data from BigQuery. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

+ * 
+ * try (BigQueryReadClient BigQueryReadClient = BigQueryReadClient.create()) {
+ *   String parent = "";
+ *   ReadSession readSession = ReadSession.newBuilder().build();
+ *   int maxStreamCount = 0;
+ *   ReadSession response = BigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+ * }
+ * 
+ * 
+ * + *

Note: close() needs to be called on the BigQueryReadClient object to clean up resources such + * as threads. In the example above, try-with-resources is used, which automatically calls close(). + * + *

The surface of this class includes several types of Java methods for each of the API's + * methods: + * + *

    + *
  1. A "flattened" method. With this type of method, the fields of the request type have been + * converted into function parameters. It may be the case that not all fields are available as + * parameters, and not every API method will have a flattened method entry point. + *
  2. A "request object" method. This type of method only takes one parameter, a request object, + * which must be constructed before the call. Not every API method will have a request object + * method. + *
  3. A "callable" method. This type of method takes no parameters and returns an immutable API + * callable object, which can be used to initiate calls to the service. + *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of BigQueryReadSettings to + * create(). For example: + * + *

To customize credentials: + * + *

+ * 
+ * BigQueryReadSettings BigQueryReadSettings =
+ *     BigQueryReadSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * BigQueryReadClient BigQueryReadClient =
+ *     BigQueryReadClient.create(BigQueryReadSettings);
+ * 
+ * 
+ * + * To customize the endpoint: + * + *
+ * 
+ * BigQueryReadSettings BigQueryReadSettings =
+ *     BigQueryReadSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * BigQueryReadClient BigQueryReadClient =
+ *     BigQueryReadClient.create(BigQueryReadSettings);
+ * 
+ * 
+ */ +@BetaApi +public class BigQueryReadClient implements BackgroundResource { + private final BigQueryReadSettings settings; + private final EnhancedBigQueryReadStub stub; + + /** Constructs an instance of BigQueryReadClient with default settings. */ + public static final BigQueryReadClient create() throws IOException { + return create(BigQueryReadSettings.newBuilder().build()); + } + + /** + * Constructs an instance of BigQueryReadClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final BigQueryReadClient create(BigQueryReadSettings settings) throws IOException { + return new BigQueryReadClient(settings); + } + + /** + * Constructs an instance of BigQueryReadClient, using the given stub for making calls. This is + * for advanced usage - prefer to use BigQueryReadSettings}. + */ + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public static final BigQueryReadClient create(EnhancedBigQueryReadStub stub) { + return new BigQueryReadClient(stub); + } + + /** + * Constructs an instance of BigQueryReadClient, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected BigQueryReadClient(BigQueryReadSettings settings) throws IOException { + this.settings = settings; + this.stub = + EnhancedBigQueryReadStub.create( + settings.getTypedStubSettings(), + settings.getReadRowsRetryAttemptListener(), + settings.isOpenTelemetryEnabled(), + settings.getOpenTelemetryTracerProvider()); + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + protected BigQueryReadClient(EnhancedBigQueryReadStub stub) { + this.settings = null; + this.stub = stub; + } + + public final BigQueryReadSettings getSettings() { + return settings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public EnhancedBigQueryReadStub getStub() { + return stub; + } + + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 24 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *


+   * try (BigQueryReadClient BigQueryReadClient = BigQueryReadClient.create()) {
+   *   String parent = "";
+   *   ReadSession readSession = ReadSession.newBuilder().build();
+   *   int maxStreamCount = 0;
+   *   ReadSession response = BigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+   * }
+   * 
+ * + * @param parent Required. The request project that owns the session, in the form of + * `projects/{project_id}`. + * @param readSession Required. Session to be created. + * @param maxStreamCount Max initial number of streams. If unset or zero, the server will provide + * a value of streams so as to produce reasonable throughput. Must be non-negative. The number + * of streams may be lower than the requested number, depending on the amount parallelism that + * is reasonable for the table. Error will be returned if the max count is greater than the + * current system max limit of 1,000. + *

Streams must be read starting from offset 0. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession( + String parent, ReadSession readSession, int maxStreamCount) { + + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parent) + .setReadSession(readSession) + .setMaxStreamCount(maxStreamCount) + .build(); + return createReadSession(request); + } + + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 24 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *


+   * try (BigQueryReadClient BigQueryReadClient = BigQueryReadClient.create()) {
+   *   CreateReadSessionRequest request = CreateReadSessionRequest.newBuilder().build();
+   *   ReadSession response = BigQueryReadClient.createReadSession(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession(CreateReadSessionRequest request) { + Span createReadSession = null; + if (settings.isOpenTelemetryEnabled()) { + createReadSession = + settings + .getOpenTelemetryTracer() + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.createReadSession") + .setAttribute( + "bq.storage.read_session.request.parent", getFieldAsString(request.getParent())) + .setAttribute( + "bq.storage.read_session.request.max_stream_count", request.getMaxStreamCount()) + .setAttribute( + "bq.storage.read_session.request.preferred_min_stream_count", + request.getPreferredMinStreamCount()) + .setAttribute( + "bq.storage.read_session.request.serialized_size", request.getSerializedSize()) + .setAllAttributes(otelAttributesFrom(request.getReadSession())) + .startSpan(); + } + try (Scope createReadSessionScope = + createReadSession != null ? createReadSession.makeCurrent() : null) { + return createReadSessionCallable().call(request); + } finally { + if (createReadSession != null) { + createReadSession.end(); + } + } + } + + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 24 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *


+   * try (BigQueryReadClient bigQueryReadClient = BigQueryReadClient.create()) {
+   *   CreateReadSessionRequest request = CreateReadSessionRequest.newBuilder().build();
+   *   ApiFuture<ReadSession> future = BigQueryReadClient.createReadSessionCallable().futureCall(request);
+   *   // Do something
+   *   ReadSession response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable createReadSessionCallable() { + Span createReadSessionCallable = null; + if (settings.isOpenTelemetryEnabled()) { + createReadSessionCallable = + settings + .getOpenTelemetryTracer() + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.createReadSessionCallable") + .startSpan(); + } + try (Scope createReadSessionCallableScope = + createReadSessionCallable != null ? createReadSessionCallable.makeCurrent() : null) { + return stub.createReadSessionCallable(); + } finally { + if (createReadSessionCallable != null) { + createReadSessionCallable.end(); + } + } + } + + /** + * Reads rows from the stream in the format prescribed by the ReadSession. Each response contains + * one or more table rows, up to a maximum of 100 MiB per response; read requests which attempt to + * read individual rows larger than 100 MiB will fail. + * + *

Each request also returns a set of stream statistics reflecting the current state of the + * stream. + * + *

Sample code: + * + *


+   * try (BigQueryReadClient bigQueryReadClient = BigQueryReadClient.create()) {
+   *   ReadRowsRequest request = ReadRowsRequest.newBuilder().build();
+   *
+   *   ServerStream<ReadRowsResponse> stream = bigQueryReadClient.readRowsCallable().call(request);
+   *   for (ReadRowsResponse response : stream) {
+   *     // Do something when receive a response
+   *   }
+   * }
+   * 
+ */ + public final ServerStreamingCallable readRowsCallable() { + Span readRowsCallable = null; + if (settings.isOpenTelemetryEnabled()) { + readRowsCallable = + settings + .getOpenTelemetryTracer() + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.readRowsCallable") + .startSpan(); + } + try (Scope readRowsCallableScope = + readRowsCallable != null ? readRowsCallable.makeCurrent() : null) { + return stub.readRowsCallable(); + } finally { + if (readRowsCallable != null) { + readRowsCallable.end(); + } + } + } + + /** + * Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are + * referred to as the primary and the residual streams of the split. The original `ReadStream` can + * still be read from in the same manner as before. Both of the returned `ReadStream` objects can + * also be read from, and the rows returned by both child streams will be the same as the rows + * read from the original stream. + * + *

Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. + * Concretely, it is guaranteed that for streams original, primary, and residual, that + * original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read + * to completion. + * + *

Sample code: + * + *


+   * try (BigQueryReadClient bigQueryReadClient = BigQueryReadClient.create()) {
+   *   SplitReadStreamRequest request = SplitReadStreamRequest.newBuilder().build();
+   *   SplitReadStreamResponse response = bigQueryReadClient.splitReadStream(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final SplitReadStreamResponse splitReadStream(SplitReadStreamRequest request) { + Span splitReadStream = null; + if (settings.isOpenTelemetryEnabled()) { + splitReadStream = + settings + .getOpenTelemetryTracer() + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.splitReadStream") + .setAllAttributes(otelAttributesFrom(request)) + .startSpan(); + } + try (Scope splitReadStreamScope = + splitReadStream != null ? splitReadStream.makeCurrent() : null) { + return splitReadStreamCallable().call(request); + } finally { + if (splitReadStream != null) { + splitReadStream.end(); + } + } + } + + /** + * Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are + * referred to as the primary and the residual streams of the split. The original `ReadStream` can + * still be read from in the same manner as before. Both of the returned `ReadStream` objects can + * also be read from, and the rows returned by both child streams will be the same as the rows + * read from the original stream. + * + *

Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. + * Concretely, it is guaranteed that for streams original, primary, and residual, that + * original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read + * to completion. + * + *

Sample code: + * + *


+   * try (BigQueryReadClient bigQueryReadClient = BigQueryReadClient.create()) {
+   *   SplitReadStreamRequest request = SplitReadStreamRequest.newBuilder().build();
+   *   ApiFuture<SplitReadStreamResponse> future = bigQueryReadClient.splitReadStreamCallable().futureCall(request);
+   *   // Do something
+   *   SplitReadStreamResponse response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable + splitReadStreamCallable() { + Span splitReadStreamCallable = null; + if (settings.isOpenTelemetryEnabled()) { + splitReadStreamCallable = + settings + .getOpenTelemetryTracer() + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.splitReadStreamCallable") + .startSpan(); + } + try (Scope readRowsCallableScope = + splitReadStreamCallable != null ? splitReadStreamCallable.makeCurrent() : null) { + return stub.splitReadStreamCallable(); + } finally { + if (splitReadStreamCallable != null) { + splitReadStreamCallable.end(); + } + } + } + + @Override + public final void close() { + Span close = null; + if (settings.isOpenTelemetryEnabled()) { + close = + settings + .getOpenTelemetryTracer() + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.close") + .startSpan(); + } + try (Scope closeScope = close != null ? close.makeCurrent() : null) { + stub.close(); + } finally { + if (close != null) { + close.end(); + } + } + } + + @Override + public void shutdown() { + Span shutdown = null; + if (settings.isOpenTelemetryEnabled()) { + shutdown = + settings + .getOpenTelemetryTracer() + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.shutdown") + .startSpan(); + } + try (Scope shutdownScope = shutdown != null ? shutdown.makeCurrent() : null) { + stub.shutdown(); + } finally { + if (shutdown != null) { + shutdown.end(); + } + } + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + Span shutdownNow = null; + if (settings.isOpenTelemetryEnabled()) { + shutdownNow = + settings + .getOpenTelemetryTracer() + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.shutdownNow") + .startSpan(); + } + try (Scope shutdownNowScope = shutdownNow != null ? shutdownNow.makeCurrent() : null) { + stub.shutdownNow(); + } finally { + if (shutdownNow != null) { + shutdownNow.end(); + } + } + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + Span awaitTermination = null; + if (settings.isOpenTelemetryEnabled()) { + awaitTermination = + settings + .getOpenTelemetryTracer() + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.awaitTermination") + .setAttribute("duration", duration) + .setAttribute("unit", unit.toString()) + .startSpan(); + } + try (Scope awaitTerminationScope = + awaitTermination != null ? awaitTermination.makeCurrent() : null) { + return stub.awaitTermination(duration, unit); + } finally { + if (awaitTermination != null) { + awaitTermination.end(); + } + } + } + + public void disableOpenTelemetryTracing() { + settings.setEnableOpenTelemetryTracing(false, null); + } + + public void enableDefaultOpenTelemetryTracing() { + settings.setEnableOpenTelemetryTracing(true, null); + } + + public void enableOpenTelemetryTracing(TracerProvider tracerProvider) { + settings.setEnableOpenTelemetryTracing(true, tracerProvider); + } + + private static String getFieldAsString(Object field) { + return field == null ? "null" : field.toString(); + } + + private Attributes otelAttributesFrom(ReadSession readSession) { + return Attributes.builder() + .put("bq.storage.read_session.name", getFieldAsString(readSession.getName())) + .put( + "bq.storage.read_session.data_format_value", + getFieldAsString(readSession.getDataFormatValue())) + .put( + "bq.storage.read_session.serialized_size", + getFieldAsString(readSession.getSerializedSize())) + .put("bq.storage.read_session.table", getFieldAsString(readSession.getTable())) + .put("bq.storage.read_session.estimated_row_count", readSession.getEstimatedRowCount()) + .put( + "bq.storage.read_session.estimated_total_bytes_scanned", + readSession.getEstimatedTotalBytesScanned()) + .put( + "bq.storage.read_session.estimated_total_physical_bytes", + readSession.getEstimatedTotalPhysicalFileSize()) + .put("bq.storage.read_session.streams_count", readSession.getStreamsCount()) + .put("bq.storage.read_session.trace_id", getFieldAsString(readSession.getTraceId())) + .put("bq.storage.read_session.expire_time", getFieldAsString(readSession.getExpireTime())) + .build(); + } + + private Attributes otelAttributesFrom(SplitReadStreamRequest request) { + return Attributes.builder() + .put("bq.storage.split_read_stream_request.name", getFieldAsString(request.getName())) + .put( + "bq.storage.split_read_stream_request.serialized_size", + getFieldAsString(request.getSerializedSize())) + .put( + "bq.storage.split_read_stream_request.fraction", + getFieldAsString(request.getFraction())) + .build(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadSettings.java new file mode 100644 index 000000000000..8c595117e7e7 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadSettings.java @@ -0,0 +1,285 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1.stub.EnhancedBigQueryReadStubSettings; +import io.grpc.Metadata; +import io.grpc.Status; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.api.trace.TracerProvider; +import java.io.IOException; +import java.util.List; + +/** + * Settings class to configure an instance of {@link BigQueryReadClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the total timeout of createReadSession to 30 seconds: + * + *

+ * 
+ * BigQueryReadSettings.Builder BigQueryReadSettingsBuilder =
+ *     BigQueryReadSettings.newBuilder();
+ * BigQueryReadSettingsBuilder.createReadSessionSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * BigQueryReadSettings BigQueryReadSettings = BigQueryReadSettingsBuilder.build();
+ * 
+ * 
+ */ +@BetaApi +public class BigQueryReadSettings extends ClientSettings { + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings createReadSessionSettings() { + return getTypedStubSettings().createReadSessionSettings(); + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings readRowsSettings() { + return getTypedStubSettings().readRowsSettings(); + } + + public static interface RetryAttemptListener { + public void onRetryAttempt(Status prevStatus, Metadata prevMetadata); + } + + private RetryAttemptListener readRowsRetryAttemptListener = null; + private boolean enableOpenTelemetryTracing = false; + private Tracer openTelemetryTracer = null; + private TracerProvider openTelemetryTracerProvider = null; + + /** + * If a non null readRowsRetryAttemptListener is provided, client will call onRetryAttempt + * function before a failed ReadRows request is retried. This can be used as negative feedback + * mechanism for future decision to split read streams because some retried failures are due to + * resource exhaustion that increased parallelism only makes it worse. + */ + public void setReadRowsRetryAttemptListener(RetryAttemptListener readRowsRetryAttemptListener) { + this.readRowsRetryAttemptListener = readRowsRetryAttemptListener; + } + + public void setEnableOpenTelemetryTracing( + boolean enableOpenTelemetryTracing, TracerProvider openTelemetryTracerProvider) { + this.enableOpenTelemetryTracing = enableOpenTelemetryTracing; + if (enableOpenTelemetryTracing) { + if (openTelemetryTracerProvider == null) { + this.openTelemetryTracer = + Singletons.getOpenTelemetry() + .getTracerProvider() + .tracerBuilder("com.google.cloud.bigquery.storage.v1.read") + .build(); + } else { + this.openTelemetryTracerProvider = openTelemetryTracerProvider; + this.openTelemetryTracer = + openTelemetryTracerProvider + .tracerBuilder("com.google.cloud.bigquery.storage.v1.read") + .build(); + } + } + } + + public RetryAttemptListener getReadRowsRetryAttemptListener() { + return readRowsRetryAttemptListener; + } + + public boolean isOpenTelemetryEnabled() { + return this.enableOpenTelemetryTracing; + } + + public Tracer getOpenTelemetryTracer() { + return this.openTelemetryTracer; + } + + public TracerProvider getOpenTelemetryTracerProvider() { + return this.openTelemetryTracerProvider; + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return getTypedStubSettings().splitReadStreamSettings(); + } + + EnhancedBigQueryReadStubSettings getTypedStubSettings() { + return (EnhancedBigQueryReadStubSettings) getStubSettings(); + } + + public static final BigQueryReadSettings create(EnhancedBigQueryReadStubSettings stub) + throws IOException { + return new BigQueryReadSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return EnhancedBigQueryReadStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return EnhancedBigQueryReadStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return EnhancedBigQueryReadStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return EnhancedBigQueryReadStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return EnhancedBigQueryReadStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return EnhancedBigQueryReadStubSettings.defaultTransportChannelProvider(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return EnhancedBigQueryReadStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryReadSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for BigQueryReadSettings. */ + public static class Builder extends ClientSettings.Builder { + protected Builder() throws IOException { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(EnhancedBigQueryReadStubSettings.newBuilder(clientContext)); + } + + private static Builder createDefault() { + return new Builder(EnhancedBigQueryReadStubSettings.newBuilder()); + } + + protected Builder(BigQueryReadSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(EnhancedBigQueryReadStubSettings.Builder stubSettings) { + super(stubSettings); + } + + public EnhancedBigQueryReadStubSettings.Builder getStubSettingsBuilder() { + return ((EnhancedBigQueryReadStubSettings.Builder) getStubSettings()); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + private RetryAttemptListener readRowsRetryAttemptListener = null; + private boolean enableOpenTelemetryTracing = false; + private TracerProvider openTelemetryTracerProvider = null; + + public Builder setReadRowsRetryAttemptListener( + RetryAttemptListener readRowsRetryAttemptListener) { + this.readRowsRetryAttemptListener = readRowsRetryAttemptListener; + return this; + } + + public Builder setEnableOpenTelemetryTracing(boolean enableOpenTelemetryTracing) { + this.enableOpenTelemetryTracing = enableOpenTelemetryTracing; + return this; + } + + public Builder setOpenTelemetryTracerProvider(TracerProvider tracerProvider) { + this.openTelemetryTracerProvider = tracerProvider; + return this; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return getStubSettingsBuilder().createReadSessionSettings(); + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return getStubSettingsBuilder().readRowsSettings(); + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder + splitReadStreamSettings() { + return getStubSettingsBuilder().splitReadStreamSettings(); + } + + @Override + public BigQueryReadSettings build() throws IOException { + BigQueryReadSettings settings = new BigQueryReadSettings(this); + settings.setReadRowsRetryAttemptListener(readRowsRetryAttemptListener); + settings.setEnableOpenTelemetryTracing( + enableOpenTelemetryTracing, openTelemetryTracerProvider); + return settings; + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQuerySchemaUtil.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQuerySchemaUtil.java new file mode 100644 index 000000000000..e57daa7ffac6 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQuerySchemaUtil.java @@ -0,0 +1,87 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.protobuf.DescriptorProtos.FieldOptions; +import com.google.protobuf.Descriptors.FieldDescriptor; +import com.google.protobuf.ExtensionLite; +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class BigQuerySchemaUtil { + + private static final String PLACEHOLDER_FILED_NAME_PREFIX = "col_"; + + /** + * * Checks if the field name is compatible with proto field naming convention. + * + * @param fieldName name for the field + * @return true if the field name is comptaible with proto naming convention, otherwise, returns + * false. + */ + public static boolean isProtoCompatible(String fieldName) { + int length = fieldName.length(); + if (length < 1) { + return false; + } + char ch = fieldName.charAt(0); + if (!((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '_')) { + return false; + } + for (int i = 1; i < length; i++) { + ch = fieldName.charAt(i); + if (!((ch >= 'a' && ch <= 'z') + || (ch >= 'A' && ch <= 'Z') + || ch == '_' + || (ch >= '0' && ch <= '9'))) { + return false; + } + } + + return true; + } + + /** + * * Generates a placeholder name that consists of a prefix + base64 encoded field name. We + * replace all dashes with underscores as they are not allowed for proto field names. + * + * @param fieldName name for the field + * @return the generated placeholder field name + */ + public static String generatePlaceholderFieldName(String fieldName) { + return PLACEHOLDER_FILED_NAME_PREFIX + + Base64.getUrlEncoder() + .withoutPadding() + .encodeToString(fieldName.getBytes(StandardCharsets.UTF_8)) + .replace('-', '_'); + } + + /** + * * Gets the user-facing field name from the descriptor + * + * @param fieldDescriptor + * @return columnName annotation if present, otherwise return the field name. + */ + public static String getFieldName(FieldDescriptor fieldDescriptor) { + return fieldDescriptor + .getOptions() + .hasExtension((ExtensionLite) AnnotationsProto.columnName) + ? fieldDescriptor + .getOptions() + .getExtension((ExtensionLite) AnnotationsProto.columnName) + : fieldDescriptor.getName(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClient.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClient.java new file mode 100644 index 000000000000..916f7ef30c05 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClient.java @@ -0,0 +1,1014 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1.stub.BigQueryWriteStub; +import com.google.cloud.bigquery.storage.v1.stub.BigQueryWriteStubSettings; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: BigQuery Write API. + * + *

The Write API can be used to write data to BigQuery. + * + *

For supplementary information about the Write API, see: + * https://cloud.google.com/bigquery/docs/write-api + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+ *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
+ *   WriteStream writeStream = WriteStream.newBuilder().build();
+ *   WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the BigQueryWriteClient object to clean up resources such + * as threads. In the example above, try-with-resources is used, which automatically calls close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

CreateWriteStream

Creates a write stream to the given table. Additionally, every table has a special stream named '_default' to which data can be written. This stream doesn't need to be created using CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. Data written to this stream is considered committed as soon as an acknowledgement is received.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createWriteStream(CreateWriteStreamRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createWriteStream(TableName parent, WriteStream writeStream) + *

  • createWriteStream(String parent, WriteStream writeStream) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createWriteStreamCallable() + *

+ *

AppendRows

Appends data to the given stream. + *

If `offset` is specified, the `offset` is checked against the end of stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an attempt is made to append to an offset beyond the current end of the stream or `ALREADY_EXISTS` if user provides an `offset` that has already been written to. User can retry with adjusted offset within the same RPC connection. If `offset` is not specified, append happens at the end of the stream. + *

The response contains an optional offset at which the append happened. No offset information will be returned for appends to a default stream. + *

Responses are received in the same order in which requests are sent. There will be one response for each successful inserted request. Responses may optionally embed error information if the originating AppendRequest was not successfully processed. + *

The specifics of when successfully appended data is made visible to the table are governed by the type of stream: + *

    + *
  • For COMMITTED streams (which includes the default stream), data is visible immediately upon successful append. + *
+ *
    + *
  • For BUFFERED streams, data is made visible via a subsequent `FlushRows` rpc which advances a cursor to a newer offset in the stream. + *
+ *
    + *
  • For PENDING streams, data is not made visible until the stream itself is finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly committed via the `BatchCommitWriteStreams` rpc. + *
+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • appendRowsCallable() + *

+ *

GetWriteStream

Gets information about a write stream.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getWriteStream(GetWriteStreamRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getWriteStream(WriteStreamName name) + *

  • getWriteStream(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getWriteStreamCallable() + *

+ *

FinalizeWriteStream

Finalize a write stream so that no new data can be appended to the stream. Finalize is not supported on the '_default' stream.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • finalizeWriteStream(FinalizeWriteStreamRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • finalizeWriteStream(WriteStreamName name) + *

  • finalizeWriteStream(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • finalizeWriteStreamCallable() + *

+ *

BatchCommitWriteStreams

Atomically commits a group of `PENDING` streams that belong to the same `parent` table. + *

Streams must be finalized before commit and cannot be committed multiple times. Once a stream is committed, data in the stream becomes available for read operations.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchCommitWriteStreams(BatchCommitWriteStreamsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • batchCommitWriteStreams(TableName parent) + *

  • batchCommitWriteStreams(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchCommitWriteStreamsCallable() + *

+ *

FlushRows

Flushes rows to a BUFFERED stream. + *

If users are appending rows to BUFFERED stream, flush operation is required in order for the rows to become available for reading. A Flush operation flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in the request. + *

Flush is not supported on the _default stream, since it is not BUFFERED.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • flushRows(FlushRowsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • flushRows(WriteStreamName writeStream) + *

  • flushRows(String writeStream) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • flushRowsCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of BigQueryWriteSettings to + * create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BigQueryWriteSettings bigQueryWriteSettings =
+ *     BigQueryWriteSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create(bigQueryWriteSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BigQueryWriteSettings bigQueryWriteSettings =
+ *     BigQueryWriteSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create(bigQueryWriteSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@Generated("by gapic-generator-java") +public class BigQueryWriteClient implements BackgroundResource { + private final BigQueryWriteSettings settings; + private final BigQueryWriteStub stub; + + /** Constructs an instance of BigQueryWriteClient with default settings. */ + public static final BigQueryWriteClient create() throws IOException { + return create(BigQueryWriteSettings.newBuilder().build()); + } + + /** + * Constructs an instance of BigQueryWriteClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final BigQueryWriteClient create(BigQueryWriteSettings settings) + throws IOException { + return new BigQueryWriteClient(settings); + } + + /** + * Constructs an instance of BigQueryWriteClient, using the given stub for making calls. This is + * for advanced usage - prefer using create(BigQueryWriteSettings). + */ + public static final BigQueryWriteClient create(BigQueryWriteStub stub) { + return new BigQueryWriteClient(stub); + } + + /** + * Constructs an instance of BigQueryWriteClient, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected BigQueryWriteClient(BigQueryWriteSettings settings) throws IOException { + this.settings = settings; + this.stub = ((BigQueryWriteStubSettings) settings.getStubSettings()).createStub(); + } + + protected BigQueryWriteClient(BigQueryWriteStub stub) { + this.settings = null; + this.stub = stub; + } + + public final BigQueryWriteSettings getSettings() { + return settings; + } + + public BigQueryWriteStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a write stream to the given table. Additionally, every table has a special stream named + * '_default' to which data can be written. This stream doesn't need to be created using + * CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. + * Data written to this stream is considered committed as soon as an acknowledgement is received. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
+   *   WriteStream writeStream = WriteStream.newBuilder().build();
+   *   WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which the stream belongs, in the format of + * `projects/{project}/datasets/{dataset}/tables/{table}`. + * @param writeStream Required. Stream to be created. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream createWriteStream(TableName parent, WriteStream writeStream) { + CreateWriteStreamRequest request = + CreateWriteStreamRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setWriteStream(writeStream) + .build(); + return createWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a write stream to the given table. Additionally, every table has a special stream named + * '_default' to which data can be written. This stream doesn't need to be created using + * CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. + * Data written to this stream is considered committed as soon as an acknowledgement is received. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString();
+   *   WriteStream writeStream = WriteStream.newBuilder().build();
+   *   WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which the stream belongs, in the format of + * `projects/{project}/datasets/{dataset}/tables/{table}`. + * @param writeStream Required. Stream to be created. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream createWriteStream(String parent, WriteStream writeStream) { + CreateWriteStreamRequest request = + CreateWriteStreamRequest.newBuilder().setParent(parent).setWriteStream(writeStream).build(); + return createWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a write stream to the given table. Additionally, every table has a special stream named + * '_default' to which data can be written. This stream doesn't need to be created using + * CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. + * Data written to this stream is considered committed as soon as an acknowledgement is received. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   CreateWriteStreamRequest request =
+   *       CreateWriteStreamRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .setWriteStream(WriteStream.newBuilder().build())
+   *           .build();
+   *   WriteStream response = bigQueryWriteClient.createWriteStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream createWriteStream(CreateWriteStreamRequest request) { + return createWriteStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a write stream to the given table. Additionally, every table has a special stream named + * '_default' to which data can be written. This stream doesn't need to be created using + * CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. + * Data written to this stream is considered committed as soon as an acknowledgement is received. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   CreateWriteStreamRequest request =
+   *       CreateWriteStreamRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .setWriteStream(WriteStream.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.createWriteStreamCallable().futureCall(request);
+   *   // Do something.
+   *   WriteStream response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createWriteStreamCallable() { + return stub.createWriteStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Appends data to the given stream. + * + *

If `offset` is specified, the `offset` is checked against the end of stream. The server + * returns `OUT_OF_RANGE` in `AppendRowsResponse` if an attempt is made to append to an offset + * beyond the current end of the stream or `ALREADY_EXISTS` if user provides an `offset` that has + * already been written to. User can retry with adjusted offset within the same RPC connection. If + * `offset` is not specified, append happens at the end of the stream. + * + *

The response contains an optional offset at which the append happened. No offset information + * will be returned for appends to a default stream. + * + *

Responses are received in the same order in which requests are sent. There will be one + * response for each successful inserted request. Responses may optionally embed error information + * if the originating AppendRequest was not successfully processed. + * + *

The specifics of when successfully appended data is made visible to the table are governed + * by the type of stream: + * + *

    + *
  • For COMMITTED streams (which includes the default stream), data is visible immediately + * upon successful append. + *
+ * + *
    + *
  • For BUFFERED streams, data is made visible via a subsequent `FlushRows` rpc which + * advances a cursor to a newer offset in the stream. + *
+ * + *
    + *
  • For PENDING streams, data is not made visible until the stream itself is finalized (via + * the `FinalizeWriteStream` rpc), and the stream is explicitly committed via the + * `BatchCommitWriteStreams` rpc. + *
+ * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   BidiStream bidiStream =
+   *       bigQueryWriteClient.appendRowsCallable().call();
+   *   AppendRowsRequest request =
+   *       AppendRowsRequest.newBuilder()
+   *           .setWriteStream(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .setOffset(Int64Value.newBuilder().build())
+   *           .setTraceId("traceId-1067401920")
+   *           .putAllMissingValueInterpretations(
+   *               new HashMap())
+   *           .build();
+   *   bidiStream.send(request);
+   *   for (AppendRowsResponse response : bidiStream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final BidiStreamingCallable appendRowsCallable() { + return stub.appendRowsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a write stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+   *   WriteStream response = bigQueryWriteClient.getWriteStream(name);
+   * }
+   * }
+ * + * @param name Required. Name of the stream to get, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream getWriteStream(WriteStreamName name) { + GetWriteStreamRequest request = + GetWriteStreamRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a write stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString();
+   *   WriteStream response = bigQueryWriteClient.getWriteStream(name);
+   * }
+   * }
+ * + * @param name Required. Name of the stream to get, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream getWriteStream(String name) { + GetWriteStreamRequest request = GetWriteStreamRequest.newBuilder().setName(name).build(); + return getWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a write stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   GetWriteStreamRequest request =
+   *       GetWriteStreamRequest.newBuilder()
+   *           .setName(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .setView(WriteStreamView.forNumber(0))
+   *           .build();
+   *   WriteStream response = bigQueryWriteClient.getWriteStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream getWriteStream(GetWriteStreamRequest request) { + return getWriteStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a write stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   GetWriteStreamRequest request =
+   *       GetWriteStreamRequest.newBuilder()
+   *           .setName(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .setView(WriteStreamView.forNumber(0))
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.getWriteStreamCallable().futureCall(request);
+   *   // Do something.
+   *   WriteStream response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getWriteStreamCallable() { + return stub.getWriteStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+   *   FinalizeWriteStreamResponse response = bigQueryWriteClient.finalizeWriteStream(name);
+   * }
+   * }
+ * + * @param name Required. Name of the stream to finalize, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FinalizeWriteStreamResponse finalizeWriteStream(WriteStreamName name) { + FinalizeWriteStreamRequest request = + FinalizeWriteStreamRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return finalizeWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString();
+   *   FinalizeWriteStreamResponse response = bigQueryWriteClient.finalizeWriteStream(name);
+   * }
+   * }
+ * + * @param name Required. Name of the stream to finalize, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FinalizeWriteStreamResponse finalizeWriteStream(String name) { + FinalizeWriteStreamRequest request = + FinalizeWriteStreamRequest.newBuilder().setName(name).build(); + return finalizeWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FinalizeWriteStreamRequest request =
+   *       FinalizeWriteStreamRequest.newBuilder()
+   *           .setName(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .build();
+   *   FinalizeWriteStreamResponse response = bigQueryWriteClient.finalizeWriteStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FinalizeWriteStreamResponse finalizeWriteStream(FinalizeWriteStreamRequest request) { + return finalizeWriteStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FinalizeWriteStreamRequest request =
+   *       FinalizeWriteStreamRequest.newBuilder()
+   *           .setName(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.finalizeWriteStreamCallable().futureCall(request);
+   *   // Do something.
+   *   FinalizeWriteStreamResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + finalizeWriteStreamCallable() { + return stub.finalizeWriteStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. + * + *

Streams must be finalized before commit and cannot be committed multiple times. Once a + * stream is committed, data in the stream becomes available for read operations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
+   *   BatchCommitWriteStreamsResponse response =
+   *       bigQueryWriteClient.batchCommitWriteStreams(parent);
+   * }
+   * }
+ * + * @param parent Required. Parent table that all the streams should belong to, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCommitWriteStreamsResponse batchCommitWriteStreams(TableName parent) { + BatchCommitWriteStreamsRequest request = + BatchCommitWriteStreamsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return batchCommitWriteStreams(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. + * + *

Streams must be finalized before commit and cannot be committed multiple times. Once a + * stream is committed, data in the stream becomes available for read operations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString();
+   *   BatchCommitWriteStreamsResponse response =
+   *       bigQueryWriteClient.batchCommitWriteStreams(parent);
+   * }
+   * }
+ * + * @param parent Required. Parent table that all the streams should belong to, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCommitWriteStreamsResponse batchCommitWriteStreams(String parent) { + BatchCommitWriteStreamsRequest request = + BatchCommitWriteStreamsRequest.newBuilder().setParent(parent).build(); + return batchCommitWriteStreams(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. + * + *

Streams must be finalized before commit and cannot be committed multiple times. Once a + * stream is committed, data in the stream becomes available for read operations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   BatchCommitWriteStreamsRequest request =
+   *       BatchCommitWriteStreamsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllWriteStreams(new ArrayList())
+   *           .build();
+   *   BatchCommitWriteStreamsResponse response =
+   *       bigQueryWriteClient.batchCommitWriteStreams(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCommitWriteStreamsResponse batchCommitWriteStreams( + BatchCommitWriteStreamsRequest request) { + return batchCommitWriteStreamsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. + * + *

Streams must be finalized before commit and cannot be committed multiple times. Once a + * stream is committed, data in the stream becomes available for read operations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   BatchCommitWriteStreamsRequest request =
+   *       BatchCommitWriteStreamsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllWriteStreams(new ArrayList())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.batchCommitWriteStreamsCallable().futureCall(request);
+   *   // Do something.
+   *   BatchCommitWriteStreamsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + batchCommitWriteStreamsCallable() { + return stub.batchCommitWriteStreamsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Flushes rows to a BUFFERED stream. + * + *

If users are appending rows to BUFFERED stream, flush operation is required in order for the + * rows to become available for reading. A Flush operation flushes up to any previously flushed + * offset in a BUFFERED stream, to the offset specified in the request. + * + *

Flush is not supported on the _default stream, since it is not BUFFERED. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   WriteStreamName writeStream =
+   *       WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+   *   FlushRowsResponse response = bigQueryWriteClient.flushRows(writeStream);
+   * }
+   * }
+ * + * @param writeStream Required. The stream that is the target of the flush operation. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FlushRowsResponse flushRows(WriteStreamName writeStream) { + FlushRowsRequest request = + FlushRowsRequest.newBuilder() + .setWriteStream(writeStream == null ? null : writeStream.toString()) + .build(); + return flushRows(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Flushes rows to a BUFFERED stream. + * + *

If users are appending rows to BUFFERED stream, flush operation is required in order for the + * rows to become available for reading. A Flush operation flushes up to any previously flushed + * offset in a BUFFERED stream, to the offset specified in the request. + * + *

Flush is not supported on the _default stream, since it is not BUFFERED. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String writeStream =
+   *       WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString();
+   *   FlushRowsResponse response = bigQueryWriteClient.flushRows(writeStream);
+   * }
+   * }
+ * + * @param writeStream Required. The stream that is the target of the flush operation. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FlushRowsResponse flushRows(String writeStream) { + FlushRowsRequest request = FlushRowsRequest.newBuilder().setWriteStream(writeStream).build(); + return flushRows(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Flushes rows to a BUFFERED stream. + * + *

If users are appending rows to BUFFERED stream, flush operation is required in order for the + * rows to become available for reading. A Flush operation flushes up to any previously flushed + * offset in a BUFFERED stream, to the offset specified in the request. + * + *

Flush is not supported on the _default stream, since it is not BUFFERED. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FlushRowsRequest request =
+   *       FlushRowsRequest.newBuilder()
+   *           .setWriteStream(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .setOffset(Int64Value.newBuilder().build())
+   *           .build();
+   *   FlushRowsResponse response = bigQueryWriteClient.flushRows(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FlushRowsResponse flushRows(FlushRowsRequest request) { + return flushRowsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Flushes rows to a BUFFERED stream. + * + *

If users are appending rows to BUFFERED stream, flush operation is required in order for the + * rows to become available for reading. A Flush operation flushes up to any previously flushed + * offset in a BUFFERED stream, to the offset specified in the request. + * + *

Flush is not supported on the _default stream, since it is not BUFFERED. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FlushRowsRequest request =
+   *       FlushRowsRequest.newBuilder()
+   *           .setWriteStream(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .setOffset(Int64Value.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.flushRowsCallable().futureCall(request);
+   *   // Do something.
+   *   FlushRowsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable flushRowsCallable() { + return stub.flushRowsCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteSettings.java new file mode 100644 index 000000000000..2f7b439c1a73 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteSettings.java @@ -0,0 +1,255 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.ApiFunction; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1.stub.BigQueryWriteStubSettings; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BigQueryWriteClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createWriteStream: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BigQueryWriteSettings.Builder bigQueryWriteSettingsBuilder = BigQueryWriteSettings.newBuilder();
+ * bigQueryWriteSettingsBuilder
+ *     .createWriteStreamSettings()
+ *     .setRetrySettings(
+ *         bigQueryWriteSettingsBuilder
+ *             .createWriteStreamSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * BigQueryWriteSettings bigQueryWriteSettings = bigQueryWriteSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@Generated("by gapic-generator-java") +public class BigQueryWriteSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to createWriteStream. */ + public UnaryCallSettings createWriteStreamSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).createWriteStreamSettings(); + } + + /** Returns the object with the settings used for calls to appendRows. */ + public StreamingCallSettings appendRowsSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).appendRowsSettings(); + } + + /** Returns the object with the settings used for calls to getWriteStream. */ + public UnaryCallSettings getWriteStreamSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).getWriteStreamSettings(); + } + + /** Returns the object with the settings used for calls to finalizeWriteStream. */ + public UnaryCallSettings + finalizeWriteStreamSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).finalizeWriteStreamSettings(); + } + + /** Returns the object with the settings used for calls to batchCommitWriteStreams. */ + public UnaryCallSettings + batchCommitWriteStreamsSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).batchCommitWriteStreamsSettings(); + } + + /** Returns the object with the settings used for calls to flushRows. */ + public UnaryCallSettings flushRowsSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).flushRowsSettings(); + } + + public static final BigQueryWriteSettings create(BigQueryWriteStubSettings stub) + throws IOException { + return new BigQueryWriteSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return BigQueryWriteStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return BigQueryWriteStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return BigQueryWriteStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return BigQueryWriteStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return BigQueryWriteStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return BigQueryWriteStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return BigQueryWriteStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryWriteSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for BigQueryWriteSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(BigQueryWriteStubSettings.newBuilder(clientContext)); + } + + protected Builder(BigQueryWriteSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(BigQueryWriteStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(BigQueryWriteStubSettings.newBuilder()); + } + + public BigQueryWriteStubSettings.Builder getStubSettingsBuilder() { + return ((BigQueryWriteStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createWriteStream. */ + public UnaryCallSettings.Builder + createWriteStreamSettings() { + return getStubSettingsBuilder().createWriteStreamSettings(); + } + + /** Returns the builder for the settings used for calls to appendRows. */ + public StreamingCallSettings.Builder + appendRowsSettings() { + return getStubSettingsBuilder().appendRowsSettings(); + } + + /** Returns the builder for the settings used for calls to getWriteStream. */ + public UnaryCallSettings.Builder getWriteStreamSettings() { + return getStubSettingsBuilder().getWriteStreamSettings(); + } + + /** Returns the builder for the settings used for calls to finalizeWriteStream. */ + public UnaryCallSettings.Builder + finalizeWriteStreamSettings() { + return getStubSettingsBuilder().finalizeWriteStreamSettings(); + } + + /** Returns the builder for the settings used for calls to batchCommitWriteStreams. */ + public UnaryCallSettings.Builder< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsSettings() { + return getStubSettingsBuilder().batchCommitWriteStreamsSettings(); + } + + /** Returns the builder for the settings used for calls to flushRows. */ + public UnaryCallSettings.Builder flushRowsSettings() { + return getStubSettingsBuilder().flushRowsSettings(); + } + + @Override + public BigQueryWriteSettings build() throws IOException { + return new BigQueryWriteSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/CivilTimeEncoder.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/CivilTimeEncoder.java new file mode 100644 index 000000000000..e09835da63ba --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/CivilTimeEncoder.java @@ -0,0 +1,361 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static com.google.cloud.bigquery.storage.util.TimeConversionUtils.toJavaTimeLocalDateTime; +import static com.google.cloud.bigquery.storage.util.TimeConversionUtils.toJavaTimeLocalTime; +import static com.google.cloud.bigquery.storage.util.TimeConversionUtils.toThreetenLocalDateTime; +import static com.google.cloud.bigquery.storage.util.TimeConversionUtils.toThreetenLocalTime; +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.api.core.ObsoleteApi; +import java.time.DateTimeException; +import java.time.temporal.ChronoUnit; + +/** + * Ported from ZetaSQL CivilTimeEncoder Original code can be found at: + * https://github.com/google/zetasql/blob/master/java/com/google/zetasql/CivilTimeEncoder.java + * Encoder for TIME and DATETIME values, according to civil_time encoding. + * + *

The valid range and number of bits required by each date/time field is as the following: + * + * + * + * + * + * + * + * + * + * + * + * + *
Range and bits for date/time fields
Field Range #Bits
Year [1, 9999] 14
Month [1, 12] 4
Day [1, 31] 5
Hour [0, 23] 5
Minute [0, 59] 6
Second [0, 59]* 6
Micros [0, 999999] 20
Nanos [0, 999999999] 30
+ * + *

* Leap second is not supported. + * + *

When encoding the TIME or DATETIME into a bit field, larger date/time field is on the more + * significant side. + */ +public final class CivilTimeEncoder { + private static final int NANO_LENGTH = 30; + private static final int MICRO_LENGTH = 20; + + private static final int NANO_SHIFT = 0; + private static final int MICRO_SHIFT = 0; + private static final int SECOND_SHIFT = 0; + private static final int MINUTE_SHIFT = 6; + private static final int HOUR_SHIFT = 12; + private static final int DAY_SHIFT = 17; + private static final int MONTH_SHIFT = 22; + private static final int YEAR_SHIFT = 26; + + private static final long NANO_MASK = 0x3FFFFFFFL; + private static final long MICRO_MASK = 0xFFFFFL; + private static final long SECOND_MASK = 0x3FL; + private static final long MINUTE_MASK = 0xFC0L; + private static final long HOUR_MASK = 0x1F000L; + private static final long DAY_MASK = 0x3E0000L; + private static final long MONTH_MASK = 0x3C00000L; + private static final long YEAR_MASK = 0xFFFC000000L; + + private static final long TIME_SECONDS_MASK = 0x1FFFFL; + private static final long TIME_MICROS_MASK = 0x1FFFFFFFFFL; + private static final long TIME_NANOS_MASK = 0x7FFFFFFFFFFFL; + private static final long DATETIME_SECONDS_MASK = 0xFFFFFFFFFFL; + private static final long DATETIME_MICROS_MASK = 0xFFFFFFFFFFFFFFFL; + + /** + * Encodes {@code time} as a 4-byte integer with seconds precision. + * + *

Encoding is as the following: + * + *

+   *      3         2         1
+   * MSB 10987654321098765432109876543210 LSB
+   *                    | H ||  M ||  S |
+   * 
+ * + * @see #decodePacked32TimeSeconds(int) + */ + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + private static int encodePacked32TimeSeconds(java.time.LocalTime time) { + checkValidTimeSeconds(time); + int bitFieldTimeSeconds = 0x0; + bitFieldTimeSeconds |= time.getHour() << HOUR_SHIFT; + bitFieldTimeSeconds |= time.getMinute() << MINUTE_SHIFT; + bitFieldTimeSeconds |= time.getSecond() << SECOND_SHIFT; + return bitFieldTimeSeconds; + } + + /** + * Decodes {@code bitFieldTimeSeconds} as a {@link LocalTime} with seconds precision. + * + *

Encoding is as the following: + * + *

+   *      3         2         1
+   * MSB 10987654321098765432109876543210 LSB
+   *                    | H ||  M ||  S |
+   * 
+ * + * @see #encodePacked32TimeSeconds(LocalTime) + */ + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + private static java.time.LocalTime decodePacked32TimeSeconds(int bitFieldTimeSeconds) { + checkValidBitField(bitFieldTimeSeconds, TIME_SECONDS_MASK); + int hourOfDay = getFieldFromBitField(bitFieldTimeSeconds, HOUR_MASK, HOUR_SHIFT); + int minuteOfHour = getFieldFromBitField(bitFieldTimeSeconds, MINUTE_MASK, MINUTE_SHIFT); + int secondOfMinute = getFieldFromBitField(bitFieldTimeSeconds, SECOND_MASK, SECOND_SHIFT); + // LocalTime validates the input parameters. + try { + return java.time.LocalTime.of(hourOfDay, minuteOfHour, secondOfMinute); + } catch (DateTimeException e) { + throw new IllegalArgumentException(e.getMessage(), e); + } + } + + /** + * This method is obsolete. Use {@link #encodePacked64TimeMicrosLocalTime(java.time.LocalTime)} + * instead. + */ + @ObsoleteApi("Use encodePacked64TimeMicrosLocalTime(java.time.LocalTime) instead") + @SuppressWarnings("GoodTime") + public static long encodePacked64TimeMicros(org.threeten.bp.LocalTime time) { + return encodePacked64TimeMicrosLocalTime(toJavaTimeLocalTime(time)); + } + + /** + * Encodes {@code time} as a 8-byte integer with microseconds precision. + * + *

Encoding is as the following: + * + *

+   *        6         5         4         3         2         1
+   * MSB 3210987654321098765432109876543210987654321098765432109876543210 LSB
+   *                                | H ||  M ||  S ||-------micros-----|
+   * 
+ * + * @see #decodePacked64TimeMicros(long) + * @see #encodePacked64TimeMicros(LocalTime) + */ + @SuppressWarnings("GoodTime") + public static long encodePacked64TimeMicrosLocalTime(java.time.LocalTime time) { + checkValidTimeMicros(time); + return (((long) encodePacked32TimeSeconds(time)) << MICRO_LENGTH) | (time.getNano() / 1_000L); + } + + /** This method is obsolete. Use {@link #decodePacked64TimeMicrosLocalTime(long)} instead. */ + @ObsoleteApi("Use decodePacked64TimeMicrosLocalTime(long) instead") + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + public static org.threeten.bp.LocalTime decodePacked64TimeMicros(long bitFieldTimeMicros) { + return toThreetenLocalTime(decodePacked64TimeMicrosLocalTime(bitFieldTimeMicros)); + } + + /** + * Decodes {@code bitFieldTimeMicros} as a {@link java.time.LocalTime} with microseconds + * precision. + * + *

Encoding is as the following: + * + *

+   *        6         5         4         3         2         1
+   * MSB 3210987654321098765432109876543210987654321098765432109876543210 LSB
+   *                                | H ||  M ||  S ||-------micros-----|
+   * 
+ * + * @see #encodePacked64TimeMicros(LocalTime) + */ + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + public static java.time.LocalTime decodePacked64TimeMicrosLocalTime(long bitFieldTimeMicros) { + checkValidBitField(bitFieldTimeMicros, TIME_MICROS_MASK); + int bitFieldTimeSeconds = (int) (bitFieldTimeMicros >> MICRO_LENGTH); + java.time.LocalTime timeSeconds = decodePacked32TimeSeconds(bitFieldTimeSeconds); + int microOfSecond = getFieldFromBitField(bitFieldTimeMicros, MICRO_MASK, MICRO_SHIFT); + checkValidMicroOfSecond(microOfSecond); + java.time.LocalTime time = timeSeconds.withNano(microOfSecond * 1000); + checkValidTimeMicros(time); + return time; + } + + /** + * Encodes {@code dateTime} as a 8-byte integer with seconds precision. + * + *

Encoding is as the following: + * + *

+   *        6         5         4         3         2         1
+   * MSB 3210987654321098765432109876543210987654321098765432109876543210 LSB
+   *                             |--- year ---||m || D || H ||  M ||  S |
+   * 
+ * + * @see #decodePacked64DatetimeSeconds(long) + */ + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + private static long encodePacked64DatetimeSeconds(java.time.LocalDateTime dateTime) { + checkValidDateTimeSeconds(dateTime); + long bitFieldDatetimeSeconds = 0x0L; + bitFieldDatetimeSeconds |= (long) dateTime.getYear() << YEAR_SHIFT; + bitFieldDatetimeSeconds |= (long) dateTime.getMonthValue() << MONTH_SHIFT; + bitFieldDatetimeSeconds |= (long) dateTime.getDayOfMonth() << DAY_SHIFT; + bitFieldDatetimeSeconds |= (long) encodePacked32TimeSeconds(dateTime.toLocalTime()); + return bitFieldDatetimeSeconds; + } + + /** + * Decodes {@code bitFieldDatetimeSeconds} as a {@link LocalDateTime} with seconds precision. + * + *

Encoding is as the following: + * + *

+   *        6         5         4         3         2         1
+   * MSB 3210987654321098765432109876543210987654321098765432109876543210 LSBa
+   *                             |--- year ---||m || D || H ||  M ||  S |
+   * 
+ * + * @see #encodePacked64DatetimeSeconds(LocalDateTime) + */ + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + private static java.time.LocalDateTime decodePacked64DatetimeSeconds( + long bitFieldDatetimeSeconds) { + checkValidBitField(bitFieldDatetimeSeconds, DATETIME_SECONDS_MASK); + int bitFieldTimeSeconds = (int) (bitFieldDatetimeSeconds & TIME_SECONDS_MASK); + java.time.LocalTime timeSeconds = decodePacked32TimeSeconds(bitFieldTimeSeconds); + int year = getFieldFromBitField(bitFieldDatetimeSeconds, YEAR_MASK, YEAR_SHIFT); + int monthOfYear = getFieldFromBitField(bitFieldDatetimeSeconds, MONTH_MASK, MONTH_SHIFT); + int dayOfMonth = getFieldFromBitField(bitFieldDatetimeSeconds, DAY_MASK, DAY_SHIFT); + try { + java.time.LocalDateTime dateTime = + java.time.LocalDateTime.of( + year, + monthOfYear, + dayOfMonth, + timeSeconds.getHour(), + timeSeconds.getMinute(), + timeSeconds.getSecond()); + checkValidDateTimeSeconds(dateTime); + return dateTime; + } catch (DateTimeException e) { + throw new IllegalArgumentException(e.getMessage(), e); + } + } + + /** + * This method is obsolete. Use {@link + * #encodePacked64DatetimeMicrosLocalDateTime(java.time.LocalDateTime)} instead. + */ + @ObsoleteApi("Use encodePacked64DatetimeMicrosLocalDateTime(java.time.LocalDateTime) instead") + @SuppressWarnings({"GoodTime-ApiWithNumericTimeUnit", "JavaLocalDateTimeGetNano"}) + public static long encodePacked64DatetimeMicros(org.threeten.bp.LocalDateTime dateTime) { + return encodePacked64DatetimeMicrosLocalDateTime(toJavaTimeLocalDateTime(dateTime)); + } + + /** + * Encodes {@code dateTime} as a 8-byte integer with microseconds precision. + * + *

Encoding is as the following: + * + *

+   *        6         5         4         3         2         1
+   * MSB 3210987654321098765432109876543210987654321098765432109876543210 LSB
+   *         |--- year ---||m || D || H ||  M ||  S ||-------micros-----|
+   * 
+ * + * @see #decodePacked64DatetimeMicros(long) + */ + @SuppressWarnings({"GoodTime-ApiWithNumericTimeUnit", "JavaLocalDateTimeGetNano"}) + public static long encodePacked64DatetimeMicrosLocalDateTime(java.time.LocalDateTime dateTime) { + checkValidDateTimeMicros(dateTime); + return (encodePacked64DatetimeSeconds(dateTime) << MICRO_LENGTH) + | (dateTime.getNano() / 1_000L); + } + + /** + * This method is obsolete. Use {@link #decodePacked64DatetimeMicrosLocalDateTime(long)} instead. + */ + @ObsoleteApi("Use decodePacked64DatetimeMicrosLocalDateTime(long) instead") + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + public static org.threeten.bp.LocalDateTime decodePacked64DatetimeMicros( + long bitFieldDatetimeMicros) { + return toThreetenLocalDateTime( + decodePacked64DatetimeMicrosLocalDateTime(bitFieldDatetimeMicros)); + } + + /** + * Decodes {@code bitFieldDatetimeMicros} as a {@link java.time.LocalDateTime} with microseconds + * precision. + * + *

Encoding is as the following: + * + *

+   *        6         5         4         3         2         1
+   * MSB 3210987654321098765432109876543210987654321098765432109876543210 LSB
+   *         |--- year ---||m || D || H ||  M ||  S ||-------micros-----|
+   * 
+ * + * @see #encodePacked64DatetimeMicros(LocalDateTime) + */ + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + public static java.time.LocalDateTime decodePacked64DatetimeMicrosLocalDateTime( + long bitFieldDatetimeMicros) { + checkValidBitField(bitFieldDatetimeMicros, DATETIME_MICROS_MASK); + long bitFieldDatetimeSeconds = bitFieldDatetimeMicros >> MICRO_LENGTH; + java.time.LocalDateTime dateTimeSeconds = + decodePacked64DatetimeSeconds(bitFieldDatetimeSeconds); + int microOfSecond = getFieldFromBitField(bitFieldDatetimeMicros, MICRO_MASK, MICRO_SHIFT); + checkValidMicroOfSecond(microOfSecond); + java.time.LocalDateTime dateTime = dateTimeSeconds.withNano(microOfSecond * 1_000); + checkValidDateTimeMicros(dateTime); + return dateTime; + } + + private static int getFieldFromBitField(long bitField, long mask, int shift) { + return (int) ((bitField & mask) >> shift); + } + + private static void checkValidTimeSeconds(java.time.LocalTime time) { + checkArgument(time.getHour() >= 0 && time.getHour() <= 23); + checkArgument(time.getMinute() >= 0 && time.getMinute() <= 59); + checkArgument(time.getSecond() >= 0 && time.getSecond() <= 59); + } + + private static void checkValidDateTimeSeconds(java.time.LocalDateTime dateTime) { + checkArgument(dateTime.getYear() >= 1 && dateTime.getYear() <= 9999); + checkArgument(dateTime.getMonthValue() >= 1 && dateTime.getMonthValue() <= 12); + checkArgument(dateTime.getDayOfMonth() >= 1 && dateTime.getDayOfMonth() <= 31); + checkValidTimeSeconds(dateTime.toLocalTime()); + } + + private static void checkValidTimeMicros(java.time.LocalTime time) { + checkValidTimeSeconds(time); + checkArgument(time.equals(time.truncatedTo(ChronoUnit.MICROS))); + } + + private static void checkValidDateTimeMicros(java.time.LocalDateTime dateTime) { + checkValidDateTimeSeconds(dateTime); + checkArgument(dateTime.equals(dateTime.truncatedTo(ChronoUnit.MICROS))); + } + + private static void checkValidMicroOfSecond(int microOfSecond) { + checkArgument(microOfSecond >= 0 && microOfSecond <= 999999); + } + + private static void checkValidBitField(long bitField, long mask) { + checkArgument((bitField & ~mask) == 0x0L); + } + + private CivilTimeEncoder() {} +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java new file mode 100644 index 000000000000..f7ca63e7fe54 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java @@ -0,0 +1,1648 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.ApiFuture; +import com.google.api.core.NanoClock; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.batching.FlowController; +import com.google.api.gax.retrying.ExponentialRetryAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.retrying.TimedAttemptSettings; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.auto.value.AutoValue; +import com.google.cloud.bigquery.storage.v1.AppendFormats.AppendRowsData; +import com.google.cloud.bigquery.storage.v1.AppendFormats.AppendRowsSchema; +import com.google.cloud.bigquery.storage.v1.AppendFormats.DataFormat; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData; +import com.google.cloud.bigquery.storage.v1.Exceptions.AppendSerializationError; +import com.google.cloud.bigquery.storage.v1.StreamConnection.DoneCallback; +import com.google.cloud.bigquery.storage.v1.StreamConnection.RequestCallback; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.protobuf.Int64Value; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import io.opentelemetry.api.common.Attributes; +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +/** + * A BigQuery Stream Writer that can be used to write data into BigQuery Table. + * + *

TODO: Support batching. + * + *

TODO: support updated schema + */ +class ConnectionWorker implements AutoCloseable { + + private static final Logger log = Logger.getLogger(StreamWriter.class.getName()); + + // Maximum wait time on inflight quota before error out. + private static long INFLIGHT_QUOTA_MAX_WAIT_TIME_MILLI = 300000; + + /* + * Maximum time waiting for request callback before shutting down the connection. + * + * We will constantly checking how much time we have been waiting for the next request callback + * if we wait too much time we will start shutting down the connections and clean up the queues. + */ + static Duration MAXIMUM_REQUEST_CALLBACK_WAIT_TIME = Duration.ofMinutes(5); + + private Lock lock; + private Condition hasMessageInWaitingQueue; + private Condition inflightReduced; + /* + * Max retry duration when trying to establish a connection. This does not + * apply to in-stream retries. + */ + private final Duration maxRetryDuration; + private ExecutorService threadPool = Executors.newFixedThreadPool(1); + + /* + * The identifier of the current stream to write to. This stream name can change during + * multiplexing. + */ + private String streamName; + + /* + * The location of this connection. + */ + private String location = null; + + /* + * The user provided schema of rows to write. This schema can change during multiplexing. + */ + private AppendRowsSchema writerSchema; + + /* + * Max allowed inflight requests in the stream. Method append is blocked at this. + */ + private final long maxInflightRequests; + + /* + * Max allowed inflight bytes in the stream. Method append is blocked at this. + */ + private final long maxInflightBytes; + + /* + * Behavior when inflight queue is exceeded. Only supports Block or Throw, default is Block. + */ + private final FlowController.LimitExceededBehavior limitExceededBehavior; + + /* + * TraceId for debugging purpose. + */ + private final String traceId; + + /* + * Enables compression on the wire. + */ + private String compressorName = null; + + /* + * Tracks current inflight requests in the stream. + */ + @GuardedBy("lock") + private long inflightRequests = 0; + + /* + * Tracks current inflight bytes in the stream. + */ + @GuardedBy("lock") + private long inflightBytes = 0; + + /* + * Tracks how often the stream was closed due to a retriable error. Streaming will stop when the + * count hits a threshold. Streaming should only be halted, if it isn't possible to establish a + * connection. Keep track of the number of reconnections in succession. This will be reset if + * a row is successfully called back. + */ + @GuardedBy("lock") + private long conectionRetryCountWithoutCallback = 0; + + @GuardedBy("lock") + private long connectionRetryStartTime = 0; + + /* + * If false, streamConnection needs to be reset. + */ + @GuardedBy("lock") + private boolean streamConnectionIsConnected = false; + + /* + * A boolean to track if we cleaned up inflight queue. + */ + @GuardedBy("lock") + private boolean inflightCleanuped = false; + + /* + * Indicates whether user has called Close() or not. + */ + @GuardedBy("lock") + private boolean userClosed = false; + + /* + * The final status of connection. Set to nonnull when connection is permanently closed. + */ + @GuardedBy("lock") + private Throwable connectionFinalStatus = null; + + /* + * Contains requests buffered in the client and not yet sent to server. + */ + @GuardedBy("lock") + private final Deque waitingRequestQueue; + + /* + * Contains sent append requests waiting for response from server. + */ + @GuardedBy("lock") + private final Deque inflightRequestQueue; + + /* + * Tracks number of destinations handled by this connection. + */ + private final Set destinationSet = ConcurrentHashMap.newKeySet(); + + /* + * Contains the updated TableSchema. + */ + @GuardedBy("lock") + private TableSchemaAndTimestamp updatedSchema; + + /* + * A client used to interact with BigQuery. + */ + private BigQueryWriteClient client; + + /* + * Wraps the underlying bi-directional stream connection with server. + */ + private StreamConnection streamConnection; + + /* + * A separate thread to handle actual communication with server. + */ + private Thread appendThread; + + /* + * The inflight wait time for the previous sent request. + */ + private final AtomicLong inflightWaitSec = new AtomicLong(0); + + /* + * A String that uniquely identifies this writer. + */ + private final String writerId = UUID.randomUUID().toString(); + + /* + * Test only exception behavior testing params. + */ + private RuntimeException testOnlyRunTimeExceptionInAppendLoop = null; + private long testOnlyAppendLoopSleepTime = 0; + + /* + * Tracks the number of responses to ignore in the case of exclusive stream retry + */ + @GuardedBy("lock") + private int responsesToIgnore = 0; + + /* + * Contains settings related to in-stream retries. If retrySettings is null, + * this implies that no retries will occur on retryable in-stream errors. + */ + private final RetrySettings retrySettings; + + private final RequestProfiler.RequestProfilerHook requestProfilerHook; + private final TelemetryMetrics telemetryMetrics; + + /** Indicate whether this connection is created during multiplexing mode. */ + private final Boolean isMultiplexing; + + private static String projectMatching = "projects/[^/]+/"; + private static Pattern streamPatternProject = Pattern.compile(projectMatching); + + static final Pattern DEFAULT_STREAM_PATTERN = + Pattern.compile("projects/([^/]+)/datasets/([^/]+)/tables/([^/]+)/(streams/)?_default$"); + + private static String tableMatching = "(projects/[^/]+/datasets/[^/]+/tables/[^/]+)/"; + private static Pattern streamPatternTable = Pattern.compile(tableMatching); + + // Latency buckets are based on a list of 1.5 ^ n + + public static Boolean isDefaultStreamName(String streamName) { + Matcher matcher = DEFAULT_STREAM_PATTERN.matcher(streamName); + return matcher.matches(); + } + + /** The maximum size of one request. Defined by the API. */ + public static long getApiMaxRequestBytes() { + return 20L * (1L << 20); // 20 megabytes (https://en.wikipedia.org/wiki/Megabyte) + } + + static String extractProjectName(String streamName) { + Matcher streamMatcher = streamPatternProject.matcher(streamName); + if (streamMatcher.find()) { + return streamMatcher.group(); + } else { + throw new IllegalStateException( + String.format("The passed in stream name does not match standard format %s", streamName)); + } + } + + static String getRoutingHeader(String streamName, String location) { + String project = extractProjectName(streamName); + return project + "locations/" + location; + } + + private String getTableName() { + Matcher tableMatcher = streamPatternTable.matcher(this.streamName); + return tableMatcher.find() ? tableMatcher.group(1) : ""; + } + + public boolean hasActiveConnection() { + boolean isConnected = false; + this.lock.lock(); + try { + if (streamConnectionIsConnected) { + isConnected = true; + } + } finally { + this.lock.unlock(); + } + return isConnected; + } + + public int getInflightRequestQueueLength() { + int length = 0; + this.lock.lock(); + try { + length = inflightRequestQueue.size(); + } finally { + this.lock.unlock(); + } + return length; + } + + @VisibleForTesting + Attributes getTelemetryAttributes() { + return telemetryMetrics.getTelemetryAttributes(); + } + + public ConnectionWorker( + String streamName, + String location, + ProtoSchema writerSchema, + long maxInflightRequests, + long maxInflightBytes, + Duration maxRetryDuration, + FlowController.LimitExceededBehavior limitExceededBehavior, + String traceId, + @Nullable String compressorName, + BigQueryWriteSettings clientSettings, + RetrySettings retrySettings, + boolean enableRequestProfiler, + boolean enableOpenTelemetry, + boolean isMultiplexing) + throws IOException { + this( + streamName, + location, + AppendRowsSchema.of(writerSchema), + maxInflightRequests, + maxInflightBytes, + maxRetryDuration, + limitExceededBehavior, + traceId, + compressorName, + clientSettings, + retrySettings, + enableRequestProfiler, + enableOpenTelemetry, + isMultiplexing); + } + + public ConnectionWorker( + String streamName, + String location, + ArrowSchema writerSchema, + long maxInflightRequests, + long maxInflightBytes, + Duration maxRetryDuration, + FlowController.LimitExceededBehavior limitExceededBehavior, + String traceId, + @Nullable String compressorName, + BigQueryWriteSettings clientSettings, + RetrySettings retrySettings, + boolean enableRequestProfiler, + boolean enableOpenTelemetry, + boolean isMultiplexing) + throws IOException { + this( + streamName, + location, + AppendRowsSchema.of(writerSchema), + maxInflightRequests, + maxInflightBytes, + maxRetryDuration, + limitExceededBehavior, + traceId, + compressorName, + clientSettings, + retrySettings, + enableRequestProfiler, + enableOpenTelemetry, + isMultiplexing); + } + + ConnectionWorker( + String streamName, + String location, + AppendRowsSchema writerSchema, + long maxInflightRequests, + long maxInflightBytes, + Duration maxRetryDuration, + FlowController.LimitExceededBehavior limitExceededBehavior, + String traceId, + @Nullable String compressorName, + BigQueryWriteSettings clientSettings, + RetrySettings retrySettings, + boolean enableRequestProfiler, + boolean enableOpenTelemetry, + boolean isMultiplexing) + throws IOException { + this.lock = new ReentrantLock(); + this.hasMessageInWaitingQueue = lock.newCondition(); + this.inflightReduced = lock.newCondition(); + this.streamName = streamName; + if (location != null && !location.isEmpty()) { + this.location = location; + } + this.maxRetryDuration = maxRetryDuration != null ? maxRetryDuration : Duration.ofMinutes(5); + if (writerSchema == null) { + throw new StatusRuntimeException( + Status.fromCode(Code.INVALID_ARGUMENT) + .withDescription("Writer schema must be provided when building this writer.")); + } + this.maxInflightRequests = maxInflightRequests; + this.maxInflightBytes = maxInflightBytes; + this.limitExceededBehavior = limitExceededBehavior; + this.traceId = traceId; + this.waitingRequestQueue = new LinkedList(); + this.inflightRequestQueue = new LinkedList(); + this.compressorName = compressorName; + this.retrySettings = retrySettings; + this.requestProfilerHook = new RequestProfiler.RequestProfilerHook(enableRequestProfiler); + this.telemetryMetrics = + new TelemetryMetrics(this, enableOpenTelemetry, getTableName(), writerId, traceId); + this.isMultiplexing = isMultiplexing; + + // Always recreate a client for connection worker. + HashMap newHeaders = new HashMap<>(); + newHeaders.putAll(clientSettings.toBuilder().getHeaderProvider().getHeaders()); + if (this.location == null) { + newHeaders.put("x-goog-request-params", "write_stream=" + this.streamName); + } else { + newHeaders.put( + "x-goog-request-params", + "write_location=" + getRoutingHeader(this.streamName, this.location)); + } + BigQueryWriteSettings stubSettings = + clientSettings.toBuilder() + .setHeaderProvider(FixedHeaderProvider.create(newHeaders)) + .build(); + this.client = BigQueryWriteClient.create(clientSettings); + + this.appendThread = + new Thread( + new Runnable() { + @Override + public void run() { + appendLoop(); + } + }); + appendThread.setUncaughtExceptionHandler( + (Thread t, Throwable e) -> { + log.warning( + "Exception thrown from append loop, thus stream writer is shutdown due to exception: " + + e.toString()); + lock.lock(); + try { + connectionFinalStatus = e; + // Move all current waiting requests to in flight queue. + while (!this.waitingRequestQueue.isEmpty()) { + AppendRequestAndResponse requestWrapper = this.waitingRequestQueue.pollFirst(); + this.inflightRequestQueue.addLast(requestWrapper); + } + } finally { + lock.unlock(); + } + cleanupConnectionAndRequests( + /* avoidBlocking= */ true); // don't perform blocking operations while on user thread + }); + this.appendThread.start(); + } + + private void resetConnection() { + log.info("Start connecting stream: " + streamName + " id: " + writerId); + telemetryMetrics.recordConnectionStart(); + if (this.streamConnection != null) { + // It's safe to directly close the previous connection as the in flight messages + // will be picked up by the next connection. + this.streamConnection.close(); + Uninterruptibles.sleepUninterruptibly( + calculateSleepTimeMilli(conectionRetryCountWithoutCallback), TimeUnit.MILLISECONDS); + } + this.streamConnection = + new StreamConnection( + this.client, + new RequestCallback() { + @Override + public void run(AppendRowsResponse response) { + requestCallback(response); + } + }, + new DoneCallback() { + @Override + public void run(Throwable finalStatus) { + doneCallback(finalStatus); + } + }, + this.compressorName); + log.info("Finish connecting stream: " + streamName + " id: " + writerId); + } + + @GuardedBy("lock") + private boolean shouldWaitForBackoff(AppendRequestAndResponse requestWrapper) { + if (this.retrySettings != null + && Instant.now().isBefore(requestWrapper.blockMessageSendDeadline)) { + log.fine( + String.format( + "Waiting for wait queue to unblock at %s for retry # %s", + requestWrapper.blockMessageSendDeadline, requestWrapper.retryCount)); + return true; + } + + return false; + } + + private void waitForBackoffIfNecessary(AppendRequestAndResponse requestWrapper) { + lock.lock(); + requestProfilerHook.startOperation( + RequestProfiler.OperationName.RETRY_BACKOFF, requestWrapper.requestUniqueId); + try { + Condition condition = lock.newCondition(); + while (shouldWaitForBackoff(requestWrapper)) { + condition.await(100, java.util.concurrent.TimeUnit.MILLISECONDS); + } + } catch (InterruptedException e) { + throw new IllegalStateException(e); + } finally { + requestProfilerHook.endOperation( + RequestProfiler.OperationName.RETRY_BACKOFF, requestWrapper.requestUniqueId); + lock.unlock(); + } + } + + @GuardedBy("lock") + private void addMessageToFrontOfWaitingQueue(AppendRequestAndResponse requestWrapper) { + addMessageToWaitingQueue(requestWrapper, /* addToFront= */ true); + } + + @GuardedBy("lock") + private void addMessageToBackOfWaitingQueue(AppendRequestAndResponse requestWrapper) { + addMessageToWaitingQueue(requestWrapper, /* addToFront= */ false); + } + + @GuardedBy("lock") + private void addMessageToWaitingQueue( + AppendRequestAndResponse requestWrapper, boolean addToFront) { + ++this.inflightRequests; + this.inflightBytes += requestWrapper.messageSize; + hasMessageInWaitingQueue.signal(); + requestProfilerHook.startOperation( + RequestProfiler.OperationName.WAIT_QUEUE, requestWrapper.requestUniqueId); + if (addToFront) { + waitingRequestQueue.addFirst(requestWrapper); + } else { + waitingRequestQueue.add(requestWrapper); + } + } + + /** Schedules the writing of rows at given offset. */ + ApiFuture append( + StreamWriter streamWriter, AppendRowsData rows, long offset, String requestUniqueId) { + Preconditions.checkArgument( + rows.format() == streamWriter.getWriterSchema().format(), + "The appended data format must be compatible with the StreamWriter's schema."); + if (this.location != null && !this.location.equals(streamWriter.getLocation())) { + throw new StatusRuntimeException( + Status.fromCode(Code.INVALID_ARGUMENT) + .withDescription( + "StreamWriter with location " + + streamWriter.getLocation() + + " is scheduled to use a connection with location " + + this.location)); + } else if (this.location == null && !streamWriter.getStreamName().equals(this.streamName)) { + // Location is null implies this is non-multiplexed connection. + throw new StatusRuntimeException( + Status.fromCode(Code.INVALID_ARGUMENT) + .withDescription( + "StreamWriter with stream name " + + streamWriter.getStreamName() + + " is scheduled to use a connection with stream name " + + this.streamName)); + } + Preconditions.checkNotNull(streamWriter); + AppendRowsRequest.Builder requestBuilder = AppendRowsRequest.newBuilder(); + Preconditions.checkArgument(rows.format() == streamWriter.getWriterSchema().format()); + if (rows.format() == DataFormat.PROTO) { + requestBuilder.setProtoRows( + ProtoData.newBuilder() + .setWriterSchema(streamWriter.getProtoSchema()) + .setRows(rows.protoRows()) + .build()); + } else if (rows.format() == DataFormat.ARROW) { + requestBuilder.setArrowRows( + ArrowData.newBuilder() + .setWriterSchema(streamWriter.getArrowSchema()) + .setRows(rows.arrowRecordBatch()) + .build()); + } else { + throw new IllegalArgumentException( + "Unsupported data format: " + rows.format() + " for stream: " + streamName); + } + if (offset >= 0) { + requestBuilder.setOffset(Int64Value.of(offset)); + } + requestBuilder.setWriteStream(streamWriter.getStreamName()); + requestBuilder.putAllMissingValueInterpretations( + streamWriter.getMissingValueInterpretationMap()); + if (streamWriter.getDefaultValueInterpretation() + != MissingValueInterpretation.MISSING_VALUE_INTERPRETATION_UNSPECIFIED) { + requestBuilder.setDefaultMissingValueInterpretation( + streamWriter.getDefaultValueInterpretation()); + } + return appendInternal( + streamWriter, requestBuilder.build(), requestUniqueId, rows.recordBatchRowCount()); + } + + Boolean isUserClosed() { + this.lock.lock(); + try { + return userClosed; + } finally { + this.lock.unlock(); + } + } + + String getWriteLocation() { + return this.location; + } + + private ApiFuture appendInternal( + StreamWriter streamWriter, + AppendRowsRequest message, + String requestUniqueId, + long recordBatchRowCount) { + AppendRequestAndResponse requestWrapper = + new AppendRequestAndResponse( + message, streamWriter, this.retrySettings, requestUniqueId, recordBatchRowCount); + if (requestWrapper.messageSize > getApiMaxRequestBytes()) { + requestWrapper.appendResult.setException( + new StatusRuntimeException( + Status.fromCode(Code.INVALID_ARGUMENT) + .withDescription( + "MessageSize is too large. Max allow: " + + getApiMaxRequestBytes() + + " Actual: " + + requestWrapper.messageSize))); + return requestWrapper.appendResult; + } + this.lock.lock(); + try { + if (userClosed) { + requestWrapper.appendResult.setException( + new Exceptions.StreamWriterClosedException( + Status.fromCode(Status.Code.FAILED_PRECONDITION) + .withDescription("Connection is already closed during append"), + streamName, + writerId)); + return requestWrapper.appendResult; + } + // Check if queue is going to be full before adding the request. + if (this.limitExceededBehavior == FlowController.LimitExceededBehavior.ThrowException) { + if (this.inflightRequests + 1 >= this.maxInflightRequests) { + throw new Exceptions.InflightRequestsLimitExceededException( + writerId, this.maxInflightRequests); + } + if (this.inflightBytes + requestWrapper.messageSize >= this.maxInflightBytes) { + throw new Exceptions.InflightBytesLimitExceededException(writerId, this.maxInflightBytes); + } + } + + if (connectionFinalStatus != null) { + String connectionFinalStatusString; + if (connectionFinalStatus + .toString() + .contains("com.google.api.gax.rpc.UnavailableException")) { + connectionFinalStatusString = + connectionFinalStatus.toString() + + ". This is a most likely a transient condition and may be corrected by retrying" + + " with a backoff."; + } else { + connectionFinalStatusString = connectionFinalStatus.toString(); + } + requestWrapper.appendResult.setException( + new Exceptions.StreamWriterClosedException( + Status.fromCode(Status.Code.FAILED_PRECONDITION) + .withDescription("Connection is closed due to " + connectionFinalStatusString), + streamName, + writerId)); + return requestWrapper.appendResult; + } + requestProfilerHook.startOperation(RequestProfiler.OperationName.WAIT_QUEUE, requestUniqueId); + ++this.inflightRequests; + this.inflightBytes += requestWrapper.messageSize; + waitingRequestQueue.addLast(requestWrapper); + hasMessageInWaitingQueue.signal(); + requestProfilerHook.startOperation( + RequestProfiler.OperationName.WAIT_INFLIGHT_QUOTA, requestUniqueId); + try { + maybeWaitForInflightQuota(); + } catch (StatusRuntimeException ex) { + --this.inflightRequests; + waitingRequestQueue.pollLast(); + this.inflightBytes -= requestWrapper.messageSize; + throw ex; + } + requestProfilerHook.endOperation( + RequestProfiler.OperationName.WAIT_INFLIGHT_QUOTA, requestUniqueId); + return requestWrapper.appendResult; + } finally { + this.lock.unlock(); + } + } + + @GuardedBy("lock") + private void maybeWaitForInflightQuota() { + long start_time = System.currentTimeMillis(); + while (this.inflightRequests >= this.maxInflightRequests + || this.inflightBytes >= this.maxInflightBytes) { + try { + inflightReduced.await(100, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + log.warning( + "Interrupted while waiting for inflight quota. Stream: " + + streamName + + " Error: " + + e.toString()); + throw new StatusRuntimeException( + Status.fromCode(Code.CANCELLED) + .withCause(e) + .withDescription("Interrupted while waiting for quota.")); + } + long current_wait_time = System.currentTimeMillis() - start_time; + if (current_wait_time > INFLIGHT_QUOTA_MAX_WAIT_TIME_MILLI) { + throw new StatusRuntimeException( + Status.fromCode(Code.CANCELLED) + .withDescription( + String.format( + "Interrupted while waiting for quota due to long waiting time %sms", + current_wait_time))); + } + } + inflightWaitSec.set((System.currentTimeMillis() - start_time) / 1000); + } + + @VisibleForTesting + static long calculateSleepTimeMilli(long retryCount) { + return (long) Math.min(Math.pow(2, retryCount) * 50, 60000); + } + + @VisibleForTesting + void setTestOnlyAppendLoopSleepTime(long testOnlyAppendLoopSleepTime) { + this.testOnlyAppendLoopSleepTime = testOnlyAppendLoopSleepTime; + } + + @VisibleForTesting + void setTestOnlyRunTimeExceptionInAppendLoop( + RuntimeException testOnlyRunTimeExceptionInAppendLoop) { + this.testOnlyRunTimeExceptionInAppendLoop = testOnlyRunTimeExceptionInAppendLoop; + } + + public long getInflightWaitSeconds() { + return inflightWaitSec.longValue(); + } + + /** + * @return a unique Id for the writer. + */ + public String getWriterId() { + return writerId; + } + + boolean isConnectionInUnrecoverableState() { + this.lock.lock(); + try { + // If final status is set, there's no + return connectionFinalStatus != null; + } finally { + this.lock.unlock(); + } + } + + /** Close the stream writer. Shut down all resources. */ + @Override + public void close() { + log.info("User closing stream: " + streamName); + this.lock.lock(); + try { + this.userClosed = true; + } finally { + this.lock.unlock(); + } + log.info("Waiting for append thread to finish. Stream: " + streamName + " id: " + writerId); + try { + appendThread.join(); + } catch (InterruptedException e) { + // Unexpected. Just swallow the exception with logging. + log.warning( + "Append handler join is interrupted. Stream: " + + streamName + + " id: " + + writerId + + " Error: " + + e.toString()); + } + this.client.close(); + try { + // Backend request has a 2 minute timeout, so wait a little longer than that. + this.client.awaitTermination(150, TimeUnit.SECONDS); + } catch (InterruptedException ignored) { + log.warning("Client await termination timeout in writer id " + writerId); + } + + try { + log.fine( + "Begin shutting down user callback thread pool for stream " + + streamName + + " id: " + + writerId); + threadPool.shutdown(); + threadPool.awaitTermination(3, TimeUnit.MINUTES); + } catch (InterruptedException e) { + // Unexpected. Just swallow the exception with logging. + log.warning( + "Close on thread pool for " + + streamName + + " id: " + + writerId + + " is interrupted with exception: " + + e.toString()); + throw new IllegalStateException( + "Thread pool shutdown is interrupted for stream " + streamName); + } + log.info("User close finishes for stream " + streamName); + } + + /* + * This loop is executed in a separate thread. + * + * It takes requests from waiting queue and sends them to server. + */ + private void appendLoop() { + Deque localQueue = new LinkedList(); + boolean streamNeedsConnecting = false; + + // Indicate whether we are at the first request after switching destination. + // True means the schema and other metadata are needed. + boolean firstRequestForTableOrSchemaSwitch = true; + + while (!waitingQueueDrained()) { + this.lock.lock(); + try { + hasMessageInWaitingQueue.await(100, TimeUnit.MILLISECONDS); + // Check whether we should error out the current append loop. + if (inflightRequestQueue.size() > 0) { + Instant sendInstant = inflightRequestQueue.getFirst().requestSendTimeStamp; + if (sendInstant != null) { + throwIfWaitCallbackTooLong(sendInstant); + } + } + + // Copy the streamConnectionIsConnected guarded by lock to a local variable. + // In addition, only reconnect if there is a retriable error. + streamNeedsConnecting = !streamConnectionIsConnected && connectionFinalStatus == null; + if (streamNeedsConnecting) { + // If the stream connection is broken, any requests on inflightRequestQueue will need + // to be resent, as the new connection has no knowledge of the requests. Copy the requests + // from inflightRequestQueue and prepent them onto the waitinRequestQueue. They need to be + // prepended as they need to be sent before new requests. + while (!inflightRequestQueue.isEmpty()) { + AppendRequestAndResponse requestWrapper = inflightRequestQueue.pollLast(); + // Consider the backend latency as completed for the current request. + requestProfilerHook.endOperation( + RequestProfiler.OperationName.RESPONSE_LATENCY, requestWrapper.requestUniqueId); + requestWrapper.requestSendTimeStamp = null; + requestProfilerHook.startOperation( + RequestProfiler.OperationName.WAIT_QUEUE, requestWrapper.requestUniqueId); + waitingRequestQueue.addFirst(requestWrapper); + } + + // If any of the inflight messages were meant to be ignored during requestCallback, they + // no longer will be able to make the round trip, so clear responsesToIgnore. + this.responsesToIgnore = 0; + } + while (!this.waitingRequestQueue.isEmpty()) { + AppendRequestAndResponse requestWrapper = this.waitingRequestQueue.pollFirst(); + requestProfilerHook.endOperation( + RequestProfiler.OperationName.WAIT_QUEUE, requestWrapper.requestUniqueId); + waitForBackoffIfNecessary(requestWrapper); + this.inflightRequestQueue.add(requestWrapper); + localQueue.addLast(requestWrapper); + } + } catch (InterruptedException e) { + log.warning( + "Interrupted while waiting for message. Stream: " + + streamName + + " id: " + + writerId + + " Error: " + + e.toString()); + } finally { + this.lock.unlock(); + } + + if (localQueue.isEmpty()) { + continue; + } + if (streamNeedsConnecting) { + // Set streamConnectionIsConnected to true, to indicate the stream has been connected. This + // should happen before the call to resetConnection. As it is unknown when the connection + // could be closed and the doneCallback called, and thus clearing the flag. + lock.lock(); + try { + this.streamConnectionIsConnected = true; + } finally { + lock.unlock(); + } + if (testOnlyRunTimeExceptionInAppendLoop != null) { + Uninterruptibles.sleepUninterruptibly(testOnlyAppendLoopSleepTime, TimeUnit.MILLISECONDS); + throw testOnlyRunTimeExceptionInAppendLoop; + } + resetConnection(); + // Set firstRequestInConnection to indicate the next request to be sent should include + // metedata. Reset everytime after reconnection. + firstRequestForTableOrSchemaSwitch = true; + } + while (!localQueue.isEmpty()) { + localQueue.peekFirst().setRequestSendQueueTime(); + AppendRequestAndResponse wrapper = localQueue.pollFirst(); + AppendRowsRequest originalRequest = wrapper.message; + String requestUniqueId = wrapper.requestUniqueId; + AppendRowsRequest.Builder originalRequestBuilder = originalRequest.toBuilder(); + // incomingWriterSchema is null if the request doesn't have a schema. + AppendRowsSchema incomingWriterSchema = getSchema(originalRequest); + // Always respect the first writer schema seen by the loop. + if (writerSchema == null && incomingWriterSchema != null) { + writerSchema = incomingWriterSchema; + } + // Consider we enter multiplexing if we met a different non empty stream name or we meet + // a new schema for the same stream name. + // For the schema comparision we don't use message differencer to speed up the comparing + // process. `equals(...)` can bring us false positive, e.g. two repeated field can be + // considered the same but is not considered equals(). However as long as it's never provide + // false negative we will always correctly pass writer schema to backend. + if ((!originalRequest.getWriteStream().isEmpty() + && !streamName.isEmpty() + && !originalRequest.getWriteStream().equals(streamName)) + || (incomingWriterSchema != null && !incomingWriterSchema.equals(writerSchema))) { + streamName = originalRequest.getWriteStream(); + telemetryMetrics.refreshOpenTelemetryTableNameAttributes(getTableName()); + writerSchema = incomingWriterSchema; + firstRequestForTableOrSchemaSwitch = true; + } + + if (firstRequestForTableOrSchemaSwitch) { + // If we are at the first request for every table switch, including the first request in + // the connection, we will attach both stream name and table schema to the request. + destinationSet.add(streamName); + originalRequestBuilder.setTraceId(wrapper.streamWriter.getFullTraceId()); + } else if (!isMultiplexing) { + // If we are not in multiplexing and not in the first request, clear the stream name. + originalRequestBuilder.clearWriteStream(); + } + + // During non table/schema switch requests, clear writer schema. + if (!firstRequestForTableOrSchemaSwitch) { + if (originalRequest.hasProtoRows()) { + originalRequestBuilder.getProtoRowsBuilder().clearWriterSchema(); + } else if (originalRequest.hasArrowRows()) { + originalRequestBuilder.getArrowRowsBuilder().clearWriterSchema(); + } else { + throw new IllegalStateException("Unsupported data format in the AppendRowsRequest."); + } + } + firstRequestForTableOrSchemaSwitch = false; + + requestProfilerHook.startOperation( + RequestProfiler.OperationName.RESPONSE_LATENCY, requestUniqueId); + + // Send should only throw an exception if there is a problem with the request. The catch + // block will handle this case, and return the exception with the result. + // Otherwise send will return: + // SUCCESS: Message was sent, wait for the callback. + // STREAM_CLOSED: Stream was closed, normally or due to en error + // NOT_ENOUGH_QUOTA: Message wasn't sent due to not enough quota. + // TODO: Handle NOT_ENOUGH_QUOTA. + // In the close case, the request is in the inflight queue, and will either be returned + // to the user with an error, or will be resent. + this.streamConnection.send(originalRequestBuilder.build()); + } + } + cleanupConnectionAndRequests(/* avoidBlocking= */ false); + } + + @Nullable + private AppendRowsSchema getSchema(AppendRowsRequest request) { + if (request.hasProtoRows()) { + return request.getProtoRows().hasWriterSchema() + ? AppendRowsSchema.of(request.getProtoRows().getWriterSchema()) + : null; + } else if (request.hasArrowRows()) { + return request.getArrowRows().hasWriterSchema() + ? AppendRowsSchema.of(request.getArrowRows().getWriterSchema()) + : null; + } else { + throw new IllegalStateException("Unsupported data format in the AppendRowsRequest."); + } + } + + private void cleanupConnectionAndRequests(boolean avoidBlocking) { + log.info( + "Cleanup starts. Stream: " + + streamName + + " id: " + + writerId + + " userClose: " + + userClosed + + " final exception: " + + (this.connectionFinalStatus == null + ? "null" + : this.connectionFinalStatus.toString())); + // At this point, the waiting queue is drained, so no more requests. + // We can close the stream connection and handle the remaining inflight requests. + if (streamConnection != null) { + this.streamConnection.close(); + if (!avoidBlocking) { + waitForDoneCallback(3, TimeUnit.MINUTES); + } + } + + // At this point, there cannot be more callback. It is safe to clean up all inflight requests. + log.info( + "Stream connection is fully closed. Cleaning up inflight requests. Stream: " + + streamName + + " id: " + + writerId); + cleanupInflightRequests(); + log.info("Append thread is done. Stream: " + streamName + " id: " + writerId); + } + + private void throwIfWaitCallbackTooLong(Instant timeToCheck) { + Duration milliSinceLastCallback = Duration.between(timeToCheck, Instant.now()); + if (milliSinceLastCallback.compareTo(MAXIMUM_REQUEST_CALLBACK_WAIT_TIME) > 0) { + throw new Exceptions.MaximumRequestCallbackWaitTimeExceededException( + milliSinceLastCallback, writerId, MAXIMUM_REQUEST_CALLBACK_WAIT_TIME); + } + } + + /* + * Returns true if waiting queue is drain, a.k.a. no more requests in the waiting queue. + * + * It serves as a signal to append thread that there cannot be any more requests in the waiting + * queue and it can prepare to stop. + */ + private boolean waitingQueueDrained() { + this.lock.lock(); + try { + return (this.userClosed || this.connectionFinalStatus != null) + && this.waitingRequestQueue.isEmpty(); + } finally { + this.lock.unlock(); + } + } + + private void waitForDoneCallback(long duration, TimeUnit timeUnit) { + log.fine( + "Waiting for done callback from stream connection. Stream: " + + streamName + + " id: " + + writerId); + long deadline = System.nanoTime() + timeUnit.toNanos(duration); + while (System.nanoTime() <= deadline) { + this.lock.lock(); + try { + if (!this.streamConnectionIsConnected) { + // Done callback is received, return. + return; + } + } finally { + this.lock.unlock(); + } + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); + } + this.lock.lock(); + try { + log.warning("Donecallback is not triggered within timeout frame for writer " + writerId); + if (connectionFinalStatus == null) { + connectionFinalStatus = + new StatusRuntimeException( + Status.fromCode(Code.CANCELLED) + .withDescription("Timeout waiting for DoneCallback.")); + } + } finally { + this.lock.unlock(); + } + + return; + } + + private void cleanupInflightRequests() { + Throwable finalStatus = + new Exceptions.StreamWriterClosedException( + Status.fromCode(Status.Code.FAILED_PRECONDITION) + .withDescription("Connection is already closed, cleanup inflight request"), + streamName, + writerId); + Deque localQueue = new LinkedList(); + this.lock.lock(); + try { + if (this.connectionFinalStatus != null) { + finalStatus = this.connectionFinalStatus; + } + while (!this.inflightRequestQueue.isEmpty()) { + localQueue.addLast(pollFirstInflightRequestQueue()); + } + this.inflightCleanuped = true; + } finally { + this.lock.unlock(); + } + log.fine( + "Cleaning " + + localQueue.size() + + " inflight requests with error: " + + finalStatus + + " for Stream " + + streamName + + " id: " + + writerId); + int sizeOfQueue = localQueue.size(); + for (int i = 0; i < sizeOfQueue; i++) { + if (i == 0) { + localQueue.pollFirst().appendResult.setException(finalStatus); + } else { + localQueue + .pollFirst() + .appendResult + .setException( + new Exceptions.StreamWriterClosedException( + Status.fromCode(Code.ABORTED) + .withDescription( + "Connection is aborted due to an unrecoverable failure of " + + "another request sharing the connection. Please retry this " + + "request."), + streamName, + writerId)); + } + } + } + + private Boolean retryOnRetryableError(Code errorCode, AppendRequestAndResponse requestWrapper) { + if (this.retrySettings == null) { + return false; + } + + if (this.retrySettings.getMaxAttempts() == 0) { + return false; + } + + if (!isConnectionErrorRetriable(errorCode) && errorCode != Code.RESOURCE_EXHAUSTED) { + return false; + } + + if (requestWrapper.retryCount < this.retrySettings.getMaxAttempts()) { + lock.lock(); + try { + requestWrapper.retryCount++; + if (this.retrySettings != null && useBackoffForError(errorCode, streamName)) { + // Trigger exponential backoff in append loop when request is resent for quota errors. + // createNextAttempt correctly initializes the retry delay; createfirstAttempt does not + // include a positive delay, just 0. + requestWrapper.attemptSettings = + requestWrapper.retryAlgorithm.createNextAttempt( + requestWrapper.attemptSettings == null + ? requestWrapper.retryAlgorithm.createFirstAttempt() + : requestWrapper.attemptSettings); + requestWrapper.blockMessageSendDeadline = + Instant.now().plusMillis(requestWrapper.attemptSettings.getRetryDelay().toMillis()); + log.info( + "Messages blocked for retry for " + + java.time.Duration.between( + java.time.Instant.now(), requestWrapper.blockMessageSendDeadline) + + " until " + + requestWrapper.blockMessageSendDeadline); + } + + Long offset = + requestWrapper.message.hasOffset() ? requestWrapper.message.getOffset().getValue() : -1; + if (isDefaultStreamName(streamName) || offset == -1) { + log.info( + String.format( + "Retrying default stream message in stream %s for in-stream error: %s, retry" + + " count: %s", + streamName, errorCode, requestWrapper.retryCount)); + addMessageToFrontOfWaitingQueue(requestWrapper); + } else { + log.info( + String.format( + "Retrying exclusive message in stream %s at offset %d for in-stream error: %s," + + " retry count: %s", + streamName, + requestWrapper.message.getOffset().getValue(), + errorCode, + requestWrapper.retryCount)); + // Send all inflight messages to front of queue + while (!inflightRequestQueue.isEmpty()) { + AppendRequestAndResponse element = pollLastInflightRequestQueue(); + addMessageToFrontOfWaitingQueue(element); + responsesToIgnore++; + } + + addMessageToFrontOfWaitingQueue(requestWrapper); + } + return true; + } finally { + lock.unlock(); + } + } + + log.info( + String.format( + "Max retry count reached for message in stream %s at offset %d. Retry count: %d", + streamName, requestWrapper.message.getOffset().getValue(), requestWrapper.retryCount)); + return false; + } + + private void requestCallback(AppendRowsResponse response) { + if (response.hasUpdatedSchema()) { + AppendRowsResponse responseWithUpdatedSchemaRemoved = + response.toBuilder().clearUpdatedSchema().build(); + + log.fine( + String.format( + "Got response with schema updated (omitting updated schema in response here): %s" + + " writer id %s", + responseWithUpdatedSchemaRemoved.toString(), writerId)); + } + + AppendRequestAndResponse requestWrapper; + this.lock.lock(); + try { + // Ignored response has arrived + if (responsesToIgnore > 0) { + if (response.hasError()) { + log.fine( + String.format("Ignoring response in stream %s at offset %s.", streamName, response)); + } else { + log.warning( + String.format( + "Unexpected successful response in stream %s at offset %s. Due to a previous" + + " retryable error being inflight, this message is being ignored.", + streamName, response.getAppendResult().getOffset())); + } + + responsesToIgnore--; + return; + } + + if (response.hasUpdatedSchema()) { + this.updatedSchema = + TableSchemaAndTimestamp.create(System.nanoTime(), response.getUpdatedSchema()); + } + // Had a successful connection with at least one result, reset retries. + // conectionRetryCountWithoutCallback is reset so that only multiple retries, without + // successful records sent, will cause the stream to fail. + if (conectionRetryCountWithoutCallback != 0) { + conectionRetryCountWithoutCallback = 0; + } + if (connectionRetryStartTime != 0) { + connectionRetryStartTime = 0; + } + if (!this.inflightRequestQueue.isEmpty()) { + Instant sendInstant = inflightRequestQueue.getFirst().requestSendTimeStamp; + if (sendInstant != null) { + Duration durationLatency = Duration.between(sendInstant, Instant.now()); + telemetryMetrics.recordNetworkLatency(durationLatency); + } + + requestWrapper = pollFirstInflightRequestQueue(); + requestProfilerHook.endOperation( + RequestProfiler.OperationName.RESPONSE_LATENCY, requestWrapper.requestUniqueId); + } else if (inflightCleanuped) { + // It is possible when requestCallback is called, the inflight queue is already drained + // because we timed out waiting for done. + return; + } else { + // This is something not expected, we shouldn't have an empty inflight queue otherwise. + log.log(Level.WARNING, "Unexpected: request callback called on an empty inflight queue."); + connectionFinalStatus = + new StatusRuntimeException( + Status.fromCode(Code.FAILED_PRECONDITION) + .withDescription("Request callback called on an empty inflight queue.")); + return; + } + } finally { + this.lock.unlock(); + } + + long rowCount = + requestWrapper.message.hasProtoRows() + ? requestWrapper.message.getProtoRows().getRows().getSerializedRowsCount() + : requestWrapper.recordBatchRowCount == -1 ? 0L : requestWrapper.recordBatchRowCount; + + telemetryMetrics.recordResponse( + requestWrapper.messageSize, + rowCount, + Code.values()[ + response.hasError() ? response.getError().getCode() : Status.Code.OK.ordinal()] + .toString(), + requestWrapper.retryCount > 0); + + // Retries need to happen on the same thread as queue locking may occur + if (response.hasError()) { + if (retryOnRetryableError(Code.values()[response.getError().getCode()], requestWrapper)) { + log.info("Attempting to retry on error: " + response.getError().toString()); + return; + } + } + + // We need a separate thread pool to unblock the next request callback. + // Otherwise user may call append inside request callback, which may be blocked on waiting + // on in flight quota, causing deadlock as requests can't be popped out of queue until + // the current request callback finishes. + threadPool.submit( + () -> { + try { + if (response.hasError()) { + Exceptions.StorageException storageException = + Exceptions.toStorageException(response.getError(), null); + log.fine(String.format("Got error message: %s", response.toString())); + if (storageException != null) { + requestWrapper.appendResult.setException(storageException); + } else if (response.getRowErrorsCount() > 0) { + Map rowIndexToErrorMessage = new HashMap<>(); + for (int i = 0; i < response.getRowErrorsCount(); i++) { + RowError rowError = response.getRowErrors(i); + rowIndexToErrorMessage.put( + Math.toIntExact(rowError.getIndex()), rowError.getMessage()); + } + AppendSerializationError exception = + new AppendSerializationError( + response.getError().getCode(), + response.getError().getMessage(), + streamName, + rowIndexToErrorMessage); + requestWrapper.appendResult.setException(exception); + } else { + StatusRuntimeException exception = + new StatusRuntimeException( + Status.fromCodeValue(response.getError().getCode()) + .withDescription(response.getError().getMessage())); + requestWrapper.appendResult.setException(exception); + } + } else { + requestWrapper.appendResult.set(response); + } + } finally { + requestProfilerHook.endOperation( + RequestProfiler.OperationName.TOTAL_LATENCY, requestWrapper.requestUniqueId); + } + }); + } + + private boolean isConnectionErrorRetriable(Code statusCode) { + return statusCode == Code.ABORTED + || statusCode == Code.UNAVAILABLE + || statusCode == Code.CANCELLED + || statusCode == Code.INTERNAL + || statusCode == Code.DEADLINE_EXCEEDED + || statusCode == Code.UNKNOWN; + } + + private boolean useBackoffForError(Code statusCode, String streamName) { + // Default stream uses backoff for INTERNAL, as THROTTLED errors are more likely with default + // streams. RESOURCE_EXHAUSTED streams are used for backoff for each stream type. + if (isDefaultStreamName(streamName)) { + if (statusCode == Code.INTERNAL) { + return true; + } + } + return statusCode == Code.RESOURCE_EXHAUSTED; + } + + private void doneCallback(Throwable finalStatus) { + log.info( + "Received done callback. Stream: " + + streamName + + " worker id: " + + writerId + + " Final status: " + + finalStatus.toString()); + boolean closedIdleConnection = + finalStatus.toString().contains("Closing the stream because it has been inactive"); + this.lock.lock(); + try { + this.streamConnectionIsConnected = false; + this.telemetryMetrics.recordConnectionEnd( + Code.values()[Status.fromThrowable(finalStatus).getCode().ordinal()].toString()); + if (connectionFinalStatus == null) { + if (!closedIdleConnection && connectionRetryStartTime == 0) { + connectionRetryStartTime = System.currentTimeMillis(); + } + // If the error can be retried, don't set it here, let it try to retry later on. + if (isConnectionErrorRetriable(Status.fromThrowable(finalStatus).getCode()) + && !userClosed + && (maxRetryDuration.toMillis() == 0f + || closedIdleConnection + || System.currentTimeMillis() - connectionRetryStartTime + <= maxRetryDuration.toMillis())) { + if (!closedIdleConnection) { + this.conectionRetryCountWithoutCallback++; + this.telemetryMetrics.recordConnectionStartWithRetry(); + } + log.info( + "Connection is going to be reestablished with the next request. Retriable error " + + finalStatus.toString() + + " received, retry count " + + conectionRetryCountWithoutCallback + + ", millis left to retry " + + (maxRetryDuration.toMillis() + - (connectionRetryStartTime > 0 + ? System.currentTimeMillis() - connectionRetryStartTime + : 0)) + + ", for stream " + + streamName + + " id:" + + writerId); + } else { + Exceptions.StorageException storageException = Exceptions.toStorageException(finalStatus); + this.connectionFinalStatus = storageException != null ? storageException : finalStatus; + log.info( + "Connection finished with error " + + finalStatus.toString() + + " for stream " + + streamName + + " with write id: " + + writerId + + ", millis left to retry was " + + (maxRetryDuration.toMillis() + - (System.currentTimeMillis() - connectionRetryStartTime))); + } + } + } finally { + this.lock.unlock(); + } + } + + @GuardedBy("lock") + private AppendRequestAndResponse pollInflightRequestQueue(boolean pollLast) { + AppendRequestAndResponse requestWrapper = + pollLast ? inflightRequestQueue.pollLast() : inflightRequestQueue.poll(); + requestWrapper.requestSendTimeStamp = null; + --this.inflightRequests; + this.inflightBytes -= requestWrapper.messageSize; + this.inflightReduced.signal(); + return requestWrapper; + } + + @GuardedBy("lock") + private AppendRequestAndResponse pollLastInflightRequestQueue() { + return pollInflightRequestQueue(/* pollLast= */ true); + } + + @GuardedBy("lock") + private AppendRequestAndResponse pollFirstInflightRequestQueue() { + return pollInflightRequestQueue(/* pollLast= */ false); + } + + /** Thread-safe getter of updated TableSchema */ + synchronized TableSchemaAndTimestamp getUpdatedSchema() { + return this.updatedSchema; + } + + // Class that wraps AppendRowsRequest and its corresponding Response future. + static final class AppendRequestAndResponse { + + final SettableApiFuture appendResult; + final AppendRowsRequest message; + final long messageSize; + // Used to determine the point at which appendLoop is able to process messages from the waiting + // queue. This is used to process errors that support exponential backoff retry. + Instant blockMessageSendDeadline; + + Integer retryCount; + + // Unique identifier for the request. + String requestUniqueId; + ExponentialRetryAlgorithm retryAlgorithm; + + // The writer that issues the call of the request. + final StreamWriter streamWriter; + + TimedAttemptSettings attemptSettings; + + // -1 means the value is not set. + long recordBatchRowCount = -1; + + // Time at which request was last sent over the network. + // If a response is no longer expected this is set back to null. + Instant requestSendTimeStamp; + + AppendRequestAndResponse( + AppendRowsRequest message, + StreamWriter streamWriter, + RetrySettings retrySettings, + String requestUniqueId, + long recordBatchRowCount) { + this.appendResult = SettableApiFuture.create(); + this.message = message; + if (message.hasProtoRows()) { + this.messageSize = message.getProtoRows().getSerializedSize(); + } else if (message.hasArrowRows()) { + this.messageSize = message.getArrowRows().getSerializedSize(); + } else { + this.messageSize = 0; + } + this.streamWriter = streamWriter; + this.requestUniqueId = requestUniqueId; + this.blockMessageSendDeadline = Instant.now(); + this.retryCount = 0; + // To be set after first retry + this.attemptSettings = null; + if (retrySettings != null) { + this.retryAlgorithm = + new ExponentialRetryAlgorithm(retrySettings, NanoClock.getDefaultClock()); + } else { + this.retryAlgorithm = null; + } + this.recordBatchRowCount = recordBatchRowCount; + } + + void setRequestSendQueueTime() { + requestSendTimeStamp = Instant.now(); + } + } + + /** Returns the current workload of this worker. */ + public Load getLoad() { + return Load.create( + inflightBytes, + inflightRequests, + destinationSet.size(), + maxInflightBytes, + maxInflightRequests); + } + + /** + * Represent the current workload for this worker. Used for multiplexing algorithm to determine + * the distribution of requests. + */ + @AutoValue + public abstract static class Load { + + // Consider the load on this worker to be overwhelmed when above some percentage of + // in-flight bytes or in-flight requests count. + private static double overwhelmedInflightCount = 0.2; + private static double overwhelmedInflightBytes = 0.2; + + // Number of in-flight requests bytes in the worker. + abstract long inFlightRequestsBytes(); + + // Number of in-flight requests count in the worker. + abstract long inFlightRequestsCount(); + + // Number of destination handled by this worker. + abstract long destinationCount(); + + // Max number of in-flight requests count allowed. + abstract long maxInflightBytes(); + + // Max number of in-flight requests bytes allowed. + abstract long maxInflightCount(); + + static Load create( + long inFlightRequestsBytes, + long inFlightRequestsCount, + long destinationCount, + long maxInflightBytes, + long maxInflightCount) { + return new AutoValue_ConnectionWorker_Load( + inFlightRequestsBytes, + inFlightRequestsCount, + destinationCount, + maxInflightBytes, + maxInflightCount); + } + + boolean isOverwhelmed() { + // Consider only in flight bytes and count for now, as by experiment those two are the most + // efficient and has great simplity. + return inFlightRequestsCount() > overwhelmedInflightCount * maxInflightCount() + || inFlightRequestsBytes() > overwhelmedInflightBytes * maxInflightBytes(); + } + + // Compares two different load. First compare in flight request bytes split by size 1024 bucket. + // Then compare the inflight requests count. + // Then compare destination count of the two connections. + public static final Comparator LOAD_COMPARATOR = + Comparator.comparing((Load key) -> (int) (key.inFlightRequestsBytes() / 1024)) + .thenComparing((Load key) -> (int) (key.inFlightRequestsCount() / 100)) + .thenComparing(Load::destinationCount); + + // Compares two different load without bucket, used in smaller scale unit testing. + public static final Comparator TEST_LOAD_COMPARATOR = + Comparator.comparing((Load key) -> (int) key.inFlightRequestsBytes()) + .thenComparing((Load key) -> (int) key.inFlightRequestsCount()) + .thenComparing(Load::destinationCount); + + @VisibleForTesting + public static void setOverwhelmedBytesThreshold(double newThreshold) { + overwhelmedInflightBytes = newThreshold; + } + + @VisibleForTesting + public static void setOverwhelmedCountsThreshold(double newThreshold) { + overwhelmedInflightCount = newThreshold; + } + } + + @VisibleForTesting + static void setMaxInflightQueueWaitTime(long waitTime) { + INFLIGHT_QUOTA_MAX_WAIT_TIME_MILLI = waitTime; + } + + @VisibleForTesting + static void setMaxInflightRequestWaitTime(Duration waitTime) { + MAXIMUM_REQUEST_CALLBACK_WAIT_TIME = waitTime; + } + + @AutoValue + abstract static class TableSchemaAndTimestamp { + + // Shows the timestamp updated schema is reported from response + abstract long updateTimeStamp(); + + // The updated schema returned from server. + abstract TableSchema updatedSchema(); + + static TableSchemaAndTimestamp create(long updateTimeStamp, TableSchema updatedSchema) { + return new AutoValue_ConnectionWorker_TableSchemaAndTimestamp(updateTimeStamp, updatedSchema); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool.java new file mode 100644 index 000000000000..baec9a8ee460 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool.java @@ -0,0 +1,518 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.batching.FlowController; +import com.google.api.gax.retrying.RetrySettings; +import com.google.auto.value.AutoValue; +import com.google.cloud.bigquery.storage.v1.AppendFormats.AppendRowsData; +import com.google.cloud.bigquery.storage.v1.ConnectionWorker.Load; +import com.google.cloud.bigquery.storage.v1.ConnectionWorker.TableSchemaAndTimestamp; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Stopwatch; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import io.opentelemetry.api.common.Attributes; +import java.io.IOException; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.logging.Logger; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +/** Pool of connections to accept appends and distirbute to different connections. */ +public class ConnectionWorkerPool { + static final Pattern STREAM_NAME_PATTERN = + Pattern.compile("projects/([^/]+)/datasets/([^/]+)/tables/([^/]+)/streams/([^/]+)"); + + private static final Logger log = Logger.getLogger(ConnectionWorkerPool.class.getName()); + /* + * Max allowed inflight requests in the stream.getInflightWaitSeconds Method append is blocked at this. + */ + private final long maxInflightRequests; + + /* + * Max allowed inflight bytes in the stream. Method append is blocked at this. + */ + private final long maxInflightBytes; + + /* + * Max retry duration for retryable errors. + */ + private final java.time.Duration maxRetryDuration; + + /* + * Retry settings for in-stream retries. + */ + private RetrySettings retrySettings; + + /* + * Behavior when inflight queue is exceeded. Only supports Block or Throw, default is Block. + */ + private final FlowController.LimitExceededBehavior limitExceededBehavior; + + /** Map from write stream to corresponding connection. */ + private final Map streamWriterToConnection = new HashMap<>(); + + /** Map from a connection to a set of write stream that have sent requests onto it. */ + private final Map> connectionToWriteStream = new HashMap<>(); + + /** Collection of all the created connections. */ + private final Set connectionWorkerPool = + Collections.synchronizedSet(new HashSet<>()); + + /* + * Contains the mapping from stream name to updated schema. + */ + private Map tableNameToUpdatedSchema = new ConcurrentHashMap<>(); + + /** Enable test related logic. */ + private static boolean enableTesting = false; + + /* + * Sets the compression to use for the calls + */ + private String compressorName; + + /** Used for test on the number of times createWorker is called. */ + private final AtomicInteger testValueCreateConnectionCount = new AtomicInteger(0); + + /* + * Tracks current inflight requests in the stream. + */ + @GuardedBy("lock") + private long inflightRequests = 0; + + /* + * Tracks current inflight bytes in the stream. + */ + @GuardedBy("lock") + private long inflightBytes = 0; + + /* + * Tracks how often the stream was closed due to a retriable error. Streaming will stop when the + * count hits a threshold. Streaming should only be halted, if it isn't possible to establish a + * connection. Keep track of the number of reconnections in succession. This will be reset if + * a row is successfully called back. + */ + @GuardedBy("lock") + private long conectionRetryCountWithoutCallback = 0; + + /* + * If false, streamConnection needs to be reset. + */ + @GuardedBy("lock") + private boolean streamConnectionIsConnected = false; + + /* + * A boolean to track if we cleaned up inflight queue. + */ + @GuardedBy("lock") + private boolean inflightCleanuped = false; + + /* + * Indicates whether user has called Close() or not. + */ + @GuardedBy("lock") + private boolean userClosed = false; + + /* + * The final status of connection. Set to nonnull when connection is permanently closed. + */ + @GuardedBy("lock") + private Throwable connectionFinalStatus = null; + + /* + * Contains the updated TableSchema. + */ + @GuardedBy("lock") + private TableSchema updatedSchema; + + /* + * A client used to interact with BigQuery. + */ + private BigQueryWriteSettings clientSettings; + + /** + * The current maximum connection count. This value is gradually increased till the user defined + * maximum connection count. + */ + private int currentMaxConnectionCount; + + /** Lock for controlling concurrent operation on add / delete connections. */ + private final Lock lock = new ReentrantLock(); + + /** Settings for connection pool. */ + @AutoValue + public abstract static class Settings { + /** + * The minimum connections each pool created before trying to reuse the previously created + * connection in multiplexing mode. + */ + abstract int minConnectionsPerRegion(); + + /** The maximum connections per connection pool. */ + abstract int maxConnectionsPerRegion(); + + public static Builder builder() { + return new AutoValue_ConnectionWorkerPool_Settings.Builder() + .setMinConnectionsPerRegion(2) + .setMaxConnectionsPerRegion(20); + } + + /** Builder for the options to config {@link ConnectionWorkerPool}. */ + @AutoValue.Builder + public abstract static class Builder { + // TODO(gaole) rename to per location for easier understanding. + public abstract Builder setMinConnectionsPerRegion(int value); + + public abstract Builder setMaxConnectionsPerRegion(int value); + + public abstract Settings build(); + } + } + + /** Static setting for connection pool. */ + private static Settings settings = Settings.builder().build(); + + private final boolean enableRequestProfiler; + private final boolean enableOpenTelemetry; + + ConnectionWorkerPool( + long maxInflightRequests, + long maxInflightBytes, + java.time.Duration maxRetryDuration, + FlowController.LimitExceededBehavior limitExceededBehavior, + @Nullable String comperssorName, + BigQueryWriteSettings clientSettings, + RetrySettings retrySettings, + boolean enableRequestProfiler, + boolean enableOpenTelemetry) { + this.maxInflightRequests = maxInflightRequests; + this.maxInflightBytes = maxInflightBytes; + this.maxRetryDuration = maxRetryDuration; + this.limitExceededBehavior = limitExceededBehavior; + this.compressorName = comperssorName; + this.clientSettings = clientSettings; + this.currentMaxConnectionCount = settings.minConnectionsPerRegion(); + this.retrySettings = retrySettings; + this.enableRequestProfiler = enableRequestProfiler; + this.enableOpenTelemetry = enableOpenTelemetry; + } + + /** + * Sets static connection pool options. + * + *

Note: this method should be triggered prior to the construction of connection pool. + */ + public static void setOptions(Settings settings) { + ConnectionWorkerPool.settings = settings; + } + + ConnectionWorker getConnectionWorker(StreamWriter streamWriter) { + ConnectionWorker connectionWorker; + lock.lock(); + try { + connectionWorker = + streamWriterToConnection.compute( + streamWriter, + (key, existingStream) -> { + // Stick to the existing stream if it's not overwhelmed. + if (existingStream != null + && !existingStream.getLoad().isOverwhelmed() + && !existingStream.isConnectionInUnrecoverableState()) { + return existingStream; + } + if (existingStream != null && existingStream.isConnectionInUnrecoverableState()) { + existingStream = null; + } + // Before search for the next connection to attach, clear the finalized connections + // first so that they will not be selected. + clearFinalizedConnectionWorker(); + // Try to create or find another existing stream to reuse. + ConnectionWorker createdOrExistingConnection = null; + try { + createdOrExistingConnection = + createOrReuseConnectionWorker(streamWriter, existingStream); + } catch (IOException e) { + throw new IllegalStateException(e); + } + // Update connection to write stream relationship. + connectionToWriteStream.computeIfAbsent( + createdOrExistingConnection, (ConnectionWorker k) -> new HashSet<>()); + connectionToWriteStream.get(createdOrExistingConnection).add(streamWriter); + return createdOrExistingConnection; + }); + } finally { + lock.unlock(); + } + return connectionWorker; + } + + /** Distributes the writing of a message to an underlying connection. */ + ApiFuture append( + StreamWriter streamWriter, AppendRowsData rows, long offset, String uniqueRequestId) { + // We are in multiplexing mode after entering the following logic. + ConnectionWorker connectionWorker = getConnectionWorker(streamWriter); + Stopwatch stopwatch = Stopwatch.createStarted(); + ApiFuture responseFuture = + connectionWorker.append(streamWriter, rows, offset, uniqueRequestId); + return ApiFutures.transform( + responseFuture, + // Add callback for update schema + (response) -> { + if (response.getWriteStream() != "" && response.hasUpdatedSchema()) { + tableNameToUpdatedSchema.put( + response.getWriteStream(), + TableSchemaAndTimestamp.create(System.nanoTime(), response.getUpdatedSchema())); + } + return response; + }, + MoreExecutors.directExecutor()); + } + + @VisibleForTesting + Attributes getTelemetryAttributes(StreamWriter streamWriter) { + ConnectionWorker connectionWorker = getConnectionWorker(streamWriter); + return connectionWorker.getTelemetryAttributes(); + } + + /** + * Create a new connection if we haven't reached current maximum, or reuse an existing connection + * with least load. + */ + private ConnectionWorker createOrReuseConnectionWorker( + StreamWriter streamWriter, ConnectionWorker existingConnectionWorker) throws IOException { + String streamReference = streamWriter.getStreamName(); + if (connectionWorkerPool.size() < currentMaxConnectionCount) { + // Always create a new connection if we haven't reached current maximum. + return createConnectionWorker( + streamWriter.getStreamName(), + streamWriter.getLocation(), + streamWriter.getWriterSchema(), + streamWriter.getFullTraceId()); + } else { + ConnectionWorker existingBestConnection = + pickBestLoadConnection( + enableTesting ? Load.TEST_LOAD_COMPARATOR : Load.LOAD_COMPARATOR, + ImmutableList.copyOf(connectionWorkerPool)); + if (!existingBestConnection.getLoad().isOverwhelmed()) { + return existingBestConnection; + } else if (currentMaxConnectionCount < settings.maxConnectionsPerRegion()) { + // At this point, we have reached the connection cap and the selected connection is + // overwhelmed, we can try scale up the connection pool. + // The connection count will go up one by one until `maxConnectionsPerPool` is reached. + currentMaxConnectionCount += 1; + if (currentMaxConnectionCount > settings.maxConnectionsPerRegion()) { + currentMaxConnectionCount = settings.maxConnectionsPerRegion(); + } + return createConnectionWorker( + streamWriter.getStreamName(), + streamWriter.getLocation(), + streamWriter.getWriterSchema(), + streamWriter.getFullTraceId()); + } else { + // Stick to the original connection if all the connections are overwhelmed. + if (existingConnectionWorker != null) { + return existingConnectionWorker; + } + // If we are at this branch, it means we reached the maximum connections. + return existingBestConnection; + } + } + } + + private void clearFinalizedConnectionWorker() { + Set connectionWorkerSet = new HashSet<>(); + for (ConnectionWorker existingWorker : connectionWorkerPool) { + if (existingWorker.isConnectionInUnrecoverableState()) { + connectionWorkerSet.add(existingWorker); + } + } + for (ConnectionWorker workerToRemove : connectionWorkerSet) { + connectionWorkerPool.remove(workerToRemove); + } + } + + /** Select out the best connection worker among the given connection workers. */ + static ConnectionWorker pickBestLoadConnection( + Comparator comparator, List connectionWorkerList) { + if (connectionWorkerList.isEmpty()) { + throw new IllegalStateException( + String.format( + "Bug in code! At least one connection worker should be passed in " + + "pickSemiBestLoadConnection(...)")); + } + // Compare all connection workers to find the connection worker with the smallest load. + // Loop and find the connection with the least load. + // The load comparision and computation process + int currentBestIndex = 0; + Load currentBestLoad = connectionWorkerList.get(currentBestIndex).getLoad(); + for (int i = 1; i < connectionWorkerList.size(); i++) { + Load loadToCompare = connectionWorkerList.get(i).getLoad(); + if (comparator.compare(loadToCompare, currentBestLoad) <= 0) { + currentBestIndex = i; + currentBestLoad = loadToCompare; + } + } + return connectionWorkerList.get(currentBestIndex); + } + + /** + * Creates a single connection worker. + * + *

Note this function need to be thread-safe across different stream reference but no need for + * a single stream reference. This is because createConnectionWorker(...) is called via + * computeIfAbsent(...) which is at most once per key. + */ + private ConnectionWorker createConnectionWorker( + String streamName, + String location, + AppendFormats.AppendRowsSchema writeSchema, + String fullTraceId) + throws IOException { + if (enableTesting) { + // Though atomic integer is super lightweight, add extra if check in case adding future logic. + testValueCreateConnectionCount.getAndIncrement(); + } + ConnectionWorker connectionWorker = + new ConnectionWorker( + streamName, + location, + writeSchema, + maxInflightRequests, + maxInflightBytes, + maxRetryDuration, + limitExceededBehavior, + fullTraceId, + compressorName, + clientSettings, + retrySettings, + enableRequestProfiler, + enableOpenTelemetry, + /* isMultiplexing= */ true); + connectionWorkerPool.add(connectionWorker); + log.info( + String.format( + "Scaling up new connection for stream name: %s, pool size after scaling up %d", + streamName, connectionWorkerPool.size())); + return connectionWorker; + } + + /** + * Reports the close of the given write stream. + * + *

The corresponding worker is not closed until there is no stream reference is targeting to + * that worker. + */ + void close(StreamWriter streamWriter) { + lock.lock(); + try { + streamWriterToConnection.remove(streamWriter); + // Since it's possible some other connections may have served this writeStream, we + // iterate and see whether it's also fine to close other connections. + Set connectionToRemove = new HashSet<>(); + for (ConnectionWorker connectionWorker : connectionToWriteStream.keySet()) { + if (connectionToWriteStream.containsKey(connectionWorker)) { + connectionToWriteStream.get(connectionWorker).remove(streamWriter); + if (connectionToWriteStream.get(connectionWorker).isEmpty()) { + connectionWorker.close(); + connectionWorkerPool.remove(connectionWorker); + connectionToRemove.add(connectionWorker); + } + } + } + log.info( + String.format( + "During closing of writeStream for %s with writer id %s, we decided to close %s " + + "connections, pool size after removal $s", + streamWriter.getStreamName(), + streamWriter.getWriterId(), + connectionToRemove.size(), + connectionToWriteStream.size() - 1)); + connectionToWriteStream.keySet().removeAll(connectionToRemove); + } finally { + lock.unlock(); + } + } + + /** Fetch the wait seconds from corresponding worker. */ + long getInflightWaitSeconds(StreamWriter streamWriter) { + lock.lock(); + try { + ConnectionWorker connectionWorker = streamWriterToConnection.get(streamWriter); + if (connectionWorker == null) { + return 0; + } else { + return connectionWorker.getInflightWaitSeconds(); + } + } finally { + lock.unlock(); + } + } + + TableSchemaAndTimestamp getUpdatedSchema(StreamWriter streamWriter) { + return tableNameToUpdatedSchema.getOrDefault(streamWriter.getStreamName(), null); + } + + /** Enable Test related logic. */ + @VisibleForTesting + static void enableTestingLogic() { + enableTesting = true; + } + + /** Returns how many times createConnectionWorker(...) is called. */ + int getCreateConnectionCount() { + return testValueCreateConnectionCount.get(); + } + + int getTotalConnectionCount() { + return connectionWorkerPool.size(); + } + + FlowController.LimitExceededBehavior limitExceededBehavior() { + return limitExceededBehavior; + } + + BigQueryWriteSettings bigQueryWriteSettings() { + return clientSettings; + } + + static String toTableName(String streamName) { + Matcher matcher = STREAM_NAME_PATTERN.matcher(streamName); + Preconditions.checkArgument(matcher.matches(), "Invalid stream name: %s.", streamName); + return "projects/" + + matcher.group(1) + + "/datasets/" + + matcher.group(2) + + "/tables/" + + matcher.group(3); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java new file mode 100644 index 000000000000..012795f5421f --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java @@ -0,0 +1,458 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.Any; +import com.google.protobuf.InvalidProtocolBufferException; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.protobuf.StatusProto; +import java.time.Duration; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import javax.annotation.Nullable; + +/** Exceptions for Storage Client Libraries. */ +public final class Exceptions { + /** Main Storage Exception. Might contain map of streams to errors for that stream. */ + public static class StorageException extends StatusRuntimeException { + + private final ImmutableMap errors; + private final String streamName; + private final Long expectedOffset; + private final Long actualOffset; + + private StorageException() { + this(null, null, null, null, ImmutableMap.of()); + } + + private StorageException( + @Nullable Status grpcStatus, + @Nullable String streamName, + @Nullable Long expectedOffset, + @Nullable Long actualOffset, + ImmutableMap errors) { + super(grpcStatus); + this.streamName = streamName; + this.expectedOffset = expectedOffset; + this.actualOffset = actualOffset; + this.errors = errors; + } + + public ImmutableMap getErrors() { + return errors; + } + + public String getStreamName() { + return streamName; + } + + public long getExpectedOffset() { + return expectedOffset; + } + + public long getActualOffset() { + return actualOffset; + } + } + + /** + * The write stream has already been finalized and will not accept further appends or flushes. To + * send additional requests, you will need to create a new write stream via CreateWriteStream. + */ + public static final class StreamFinalizedException extends StorageException { + protected StreamFinalizedException(Status grpcStatus, String name) { + super(grpcStatus, name, null, null, ImmutableMap.of()); + } + } + + /** + * There was a schema mismatch due to bigquery table with fewer fields than the input message. + * This can be resolved by updating the table's schema with the message schema. + */ + public static final class SchemaMismatchedException extends StorageException { + protected SchemaMismatchedException(Status grpcStatus, String name) { + super(grpcStatus, name, null, null, ImmutableMap.of()); + } + } + + /** + * Offset already exists. This indicates that the append request attempted to write data to an + * offset before the current end of the stream. This is an expected exception when ExactOnce is + * enforced. You can safely ignore it, and keep appending until there is new data to append. + */ + public static final class OffsetAlreadyExists extends StorageException { + protected OffsetAlreadyExists( + Status grpcStatus, String name, Long expectedOffset, Long actualOffset) { + super(grpcStatus, name, expectedOffset, actualOffset, ImmutableMap.of()); + } + } + + /** + * Offset out of range. This indicates that the append request is attempting to write data to a + * point beyond the current end of the stream. To append data successfully, you must either + * specify the offset corresponding to the current end of stream, or omit the offset from the + * append request. It usually means a bug in your code that introduces a gap in appends. + */ + public static final class OffsetOutOfRange extends StorageException { + protected OffsetOutOfRange( + Status grpcStatus, String name, Long expectedOffset, Long actualOffset) { + super(grpcStatus, name, expectedOffset, actualOffset, ImmutableMap.of()); + } + } + + /** + * The stream is not found. Possible causes include incorrectly specifying the stream identifier + * or attempting to use an old stream identifier that no longer exists. You can invoke + * CreateWriteStream to create a new stream. + */ + public static final class StreamNotFound extends StorageException { + protected StreamNotFound(Status grpcStatus, String name) { + super(grpcStatus, name, null, null, ImmutableMap.of()); + } + } + + private static StorageError toStorageError(com.google.rpc.Status rpcStatus) { + for (Any detail : rpcStatus.getDetailsList()) { + if (detail.is(StorageError.class)) { + try { + return detail.unpack(StorageError.class); + } catch (InvalidProtocolBufferException protoException) { + throw new IllegalStateException(protoException); + } + } + } + return null; + } + + /** + * Converts a c.g.rpc.Status into a StorageException, if possible. Examines the embedded + * StorageError, and potentially returns a {@link StreamFinalizedException} or {@link + * SchemaMismatchedException} (both derive from StorageException). If there is no StorageError, or + * the StorageError is a different error it will return NULL. + */ + @Nullable + public static StorageException toStorageException( + com.google.rpc.Status rpcStatus, Throwable exception) { + if (rpcStatus == null) { + return null; + } + StorageError error = toStorageError(rpcStatus); + Status grpcStatus = + Status.fromCodeValue(rpcStatus.getCode()).withDescription(rpcStatus.getMessage()); + if (error == null) { + return null; + } + String streamName = error.getEntity(); + // The error message should have Entity but it's missing from the message for + // OFFSET_ALREADY_EXISTS + // TODO: Simplify the logic below when backend fixes passing Entity for OFFSET_ALREADY_EXISTS + // error + String errorMessage = + error.getErrorMessage().indexOf("Entity") > 0 + ? error.getErrorMessage().substring(0, error.getErrorMessage().indexOf("Entity")).trim() + : error.getErrorMessage().trim(); + + // Ensure that erro message has the desirable pattern for parsing + String errormessagePatternString = "expected offset [0-9]+, received [0-9]+"; + Pattern errorMessagePattern = Pattern.compile(errormessagePatternString); + Matcher errorMessageMatcher = errorMessagePattern.matcher(errorMessage); + + Long expectedOffet; + Long actualOffset; + if (!errorMessageMatcher.find()) { + expectedOffet = -1L; + actualOffset = -1L; + } else { + expectedOffet = + Long.parseLong( + errorMessage.substring( + errorMessage.lastIndexOf("offset") + 7, errorMessage.lastIndexOf(","))); + actualOffset = Long.parseLong(errorMessage.substring(errorMessage.lastIndexOf(" ") + 1)); + } + switch (error.getCode()) { + case STREAM_FINALIZED: + return new StreamFinalizedException(grpcStatus, streamName); + + case STREAM_NOT_FOUND: + return new StreamNotFound(grpcStatus, streamName); + + case SCHEMA_MISMATCH_EXTRA_FIELDS: + return new SchemaMismatchedException(grpcStatus, streamName); + + case OFFSET_OUT_OF_RANGE: + return new OffsetOutOfRange(grpcStatus, streamName, expectedOffet, actualOffset); + + case OFFSET_ALREADY_EXISTS: + return new OffsetAlreadyExists(grpcStatus, streamName, expectedOffet, actualOffset); + + default: + return null; + } + } + + /** + * Converts a Throwable into a StorageException, if possible. Examines the embedded error message, + * and potentially returns a {@link StreamFinalizedException} or {@link SchemaMismatchedException} + * (both derive from StorageException). If there is no StorageError, or the StorageError is a + * different error it will return NULL. + */ + @Nullable + public static StorageException toStorageException(Throwable exception) { + com.google.rpc.Status rpcStatus = StatusProto.fromThrowable(exception); + return toStorageException(rpcStatus, exception); + } + + /** + * This class has a typo in the name. It will be removed soon. Please use {@link + * AppendSerializationError} + */ + public static class AppendSerializtionError extends StatusRuntimeException { + private final Map rowIndexToErrorMessage; + private final String streamName; + + public AppendSerializtionError( + int codeValue, + String description, + String streamName, + Map rowIndexToErrorMessage) { + super(Status.fromCodeValue(codeValue).withDescription(description)); + this.rowIndexToErrorMessage = rowIndexToErrorMessage; + this.streamName = streamName; + } + + public Map getRowIndexToErrorMessage() { + return rowIndexToErrorMessage; + } + + public String getStreamName() { + return streamName; + } + } + + /** + * This exception is thrown from {@link SchemaAwareStreamWriter#append(Iterable)} when the client + * side Proto serialization fails. It can also be thrown by the server in case rows contains + * invalid data. The exception contains a Map of indexes of faulty rows and the corresponding + * error message. + */ + public static class AppendSerializationError extends AppendSerializtionError { + + public AppendSerializationError( + int codeValue, + String description, + String streamName, + Map rowIndexToErrorMessage) { + super(codeValue, description, streamName, rowIndexToErrorMessage); + } + } + + /** This exception is thrown from proto converter to wrap the row index to error mapping. */ + static class RowIndexToErrorException extends IllegalArgumentException { + Map rowIndexToErrorMessage; + + boolean hasDataUnknownError; + + public RowIndexToErrorException( + Map rowIndexToErrorMessage, boolean hasDataUnknownError) { + this.rowIndexToErrorMessage = rowIndexToErrorMessage; + this.hasDataUnknownError = hasDataUnknownError; + } + + // This message should not be exposed to the user directly. + // Please examine individual row's error through `rowIndexToErrorMessage`. + public String getMessage() { + return "The map of row index to error message is " + rowIndexToErrorMessage.toString(); + } + + public boolean hasDataUnknownError() { + return hasDataUnknownError; + } + } + + /** This exception is used internally to handle field level parsing errors. */ + public static class FieldParseError extends IllegalArgumentException { + private final String fieldName; + private final String bqType; + private final Throwable cause; + + protected FieldParseError(String fieldName, String bqType, Throwable cause) { + this.fieldName = fieldName; + this.bqType = bqType; + this.cause = cause; + } + + public String getFieldName() { + return fieldName; + } + + public String getBqType() { + return bqType; + } + + public Throwable getCause() { + return cause; + } + + public String getMessage() { + return cause.getMessage(); + } + } + + /** + * This writer instance has either been closed by the user explicitly, or has encountered + * non-retriable errors. + * + *

To continue to write to the same stream, you will need to create a new writer instance. + */ + public static final class StreamWriterClosedException extends StatusRuntimeException { + private final String streamName; + private final String writerId; + + protected StreamWriterClosedException(Status grpcStatus, String streamName, String writerId) { + super(grpcStatus); + this.streamName = streamName; + this.writerId = writerId; + } + + public String getStreamName() { + return streamName; + } + + public String getWriterId() { + return writerId; + } + } + + /** + * If FlowController.LimitExceededBehavior is set to Block and inflight limit is exceeded, this + * exception will be thrown. If it is just a spike, you may retry the request. Otherwise, you can + * increase the inflight limit or create more StreamWriter to handle your traffic. + */ + public static class InflightLimitExceededException extends StatusRuntimeException { + private final long currentLimit; + private final String writerId; + + protected InflightLimitExceededException( + Status grpcStatus, String writerId, long currentLimit) { + super(grpcStatus); + this.currentLimit = currentLimit; + this.writerId = writerId; + } + + public String getWriterId() { + return writerId; + } + + public long getCurrentLimit() { + return currentLimit; + } + } + + public static class InflightRequestsLimitExceededException + extends InflightLimitExceededException { + protected InflightRequestsLimitExceededException(String writerId, long currentLimit) { + super( + Status.fromCode(Status.Code.RESOURCE_EXHAUSTED) + .withDescription( + "Exceeds client side inflight buffer, consider add more buffer or open more" + + " connections. Current limit: " + + currentLimit), + writerId, + currentLimit); + } + } + + public static class InflightBytesLimitExceededException extends InflightLimitExceededException { + protected InflightBytesLimitExceededException(String writerId, long currentLimit) { + super( + Status.fromCode(Status.Code.RESOURCE_EXHAUSTED) + .withDescription( + "Exceeds client side inflight buffer, consider add more buffer or open more" + + " connections. Current limit: " + + currentLimit), + writerId, + currentLimit); + } + } + + /** + * This class is replaced by a generic one. It will be removed soon. Please use {@link + * DataHasUnknownFieldException} + */ + public static final class JsonDataHasUnknownFieldException extends DataHasUnknownFieldException { + protected JsonDataHasUnknownFieldException(String jsonFieldName) { + super(jsonFieldName); + } + } + + /** + * Input data object has unknown field to the schema of the SchemaAwareStreamWriter. User can + * either turn on IgnoreUnknownFields option on the SchemaAwareStreamWriter, or if they don't want + * the error to be ignored, they should recreate the SchemaAwareStreamWriter with the updated + * table schema. + */ + public static class DataHasUnknownFieldException extends IllegalArgumentException { + private final String jsonFieldName; + + public DataHasUnknownFieldException(String jsonFieldName) { + super(String.format("The source object has fields unknown to BigQuery: %s.", jsonFieldName)); + this.jsonFieldName = jsonFieldName; + } + + public String getFieldName() { + return jsonFieldName; + } + } + + /** + * The connection was shut down because a callback was not received within the maximum wait time. + */ + public static class MaximumRequestCallbackWaitTimeExceededException extends RuntimeException { + private final Duration callbackWaitTime; + private final String writerId; + private final Duration callbackWaitTimeLimit; + + public MaximumRequestCallbackWaitTimeExceededException( + Duration callbackWaitTime, String writerId, Duration callbackWaitTimeLimit) { + super( + String.format( + "Request has waited in inflight queue for %sms for writer %s, " + + "which is over maximum wait time %s", + callbackWaitTime, writerId, callbackWaitTimeLimit.toString())); + this.callbackWaitTime = callbackWaitTime; + this.writerId = writerId; + this.callbackWaitTimeLimit = callbackWaitTimeLimit; + } + + public Duration getCallbackWaitTime() { + return callbackWaitTime; + } + + public String getWriterId() { + return writerId; + } + + public Duration getCallbackWaitTimeLimit() { + return callbackWaitTimeLimit; + } + } + + private Exceptions() {} +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.java new file mode 100644 index 000000000000..1dcba090f095 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.java @@ -0,0 +1,490 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.batching.FlowControlSettings; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.ExecutorProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.gson.JsonArray; +import com.google.protobuf.Descriptors; +import java.io.IOException; +import java.time.Duration; +import java.util.Map; +import org.json.JSONArray; + +/** + * A StreamWriter that can write JSON data (JSONObjects) to BigQuery tables. The JsonStreamWriter is + * built on top of a StreamWriter, and it simply converts all JSON data to protobuf messages then + * calls StreamWriter's append() method to write to BigQuery tables. It maintains all StreamWriter + * functions, but also provides an additional feature: schema update support, where if the BigQuery + * table schema is updated, users will be able to ingest data on the new schema after some time (in + * order of minutes). + */ +public class JsonStreamWriter implements AutoCloseable { + private final SchemaAwareStreamWriter schemaAwareStreamWriter; + private static final String CLIENT_ID = "java-jsonwriter"; + + /** + * Constructs the JsonStreamWriter + * + * @param builder The Builder object for the JsonStreamWriter + */ + private JsonStreamWriter(SchemaAwareStreamWriter.Builder builder) + throws Descriptors.DescriptorValidationException, + IllegalArgumentException, + IOException, + InterruptedException { + this.schemaAwareStreamWriter = builder.build(); + } + + /** + * Writes a JSONArray that contains JSONObjects to the BigQuery table by first converting the JSON + * data to protobuf messages, then using StreamWriter's append() to write the data at current end + * of stream. If there is a schema update, the current StreamWriter is closed. A new StreamWriter + * is created with the updated TableSchema. + * + * @param jsonArr The JSON array that contains JSONObjects to be written + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture + */ + public ApiFuture append(JSONArray jsonArr) + throws IOException, Descriptors.DescriptorValidationException { + return this.schemaAwareStreamWriter.append(jsonArr); + } + + /** + * Writes a JSONArray that contains JSONObjects to the BigQuery table by first converting the JSON + * data to protobuf messages, then using StreamWriter's append() to write the data at the + * specified offset. If there is a schema update, the current StreamWriter is closed. A new + * StreamWriter is created with the updated TableSchema. + * + * @param jsonArr The JSON array that contains JSONObjects to be written + * @param offset Offset for deduplication + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture + */ + public ApiFuture append(JSONArray jsonArr, long offset) + throws IOException, Descriptors.DescriptorValidationException { + return this.schemaAwareStreamWriter.append(jsonArr, offset); + } + + private JSONArray gsonToOrgJSON(JsonArray jsonArr) { + return new JSONArray(jsonArr.toString()); + } + + /** + * Writes a JsonArray that contains JsonObjects to the BigQuery table by first converting the JSON + * data to protobuf messages, then using StreamWriter's append() to write the data at current end + * of stream. If there is a schema update, the current StreamWriter is closed. A new StreamWriter + * is created with the updated TableSchema. + * + * @param jsonArr The JSON array that contains JsonObjects to be written + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture + */ + public ApiFuture append(JsonArray jsonArr) + throws IOException, Descriptors.DescriptorValidationException { + return this.append(jsonArr, -1); + } + + /** + * Writes a JsonArray that contains JsonObjects to the BigQuery table by first converting the JSON + * data to protobuf messages, then using StreamWriter's append() to write the data at the + * specified offset. If there is a schema update, the current StreamWriter is closed. A new + * StreamWriter is created with the updated TableSchema. + * + * @param jsonArr The JSON array that contains JSONObjects to be written + * @param offset Offset for deduplication + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture + */ + public ApiFuture append(JsonArray jsonArr, long offset) + throws IOException, Descriptors.DescriptorValidationException { + return this.append(gsonToOrgJSON(jsonArr), offset); + } + + public String getStreamName() { + return this.schemaAwareStreamWriter.getStreamName(); + } + + /** + * @return A unique Id for this writer. + */ + public String getWriterId() { + return this.schemaAwareStreamWriter.getWriterId(); + } + + /** + * Gets current descriptor + * + * @return Descriptor + */ + public Descriptors.Descriptor getDescriptor() { + return this.schemaAwareStreamWriter.getDescriptor(); + } + + /** + * Gets the location of the destination + * + * @return Descriptor + */ + public String getLocation() { + return this.schemaAwareStreamWriter.getLocation(); + } + + /** + * Returns the wait of a request in Client side before sending to the Server. Request could wait + * in Client because it reached the client side inflight request limit (adjustable when + * constructing the Writer). The value is the wait time for the last sent request. A constant high + * wait value indicates a need for more throughput, you can create a new Stream for to increase + * the throughput in exclusive stream case, or create a new Writer in the default stream case. + */ + public long getInflightWaitSeconds() { + return this.schemaAwareStreamWriter.getInflightWaitSeconds(); + } + + /** + * @return the missing value interpretation map used for the writer. + */ + public Map + getMissingValueInterpretationMap() { + return this.schemaAwareStreamWriter.getMissingValueInterpretationMap(); + } + + /** + * newBuilder that constructs a JsonStreamWriter builder with BigQuery client being initialized by + * StreamWriter by default. + * + *

The table schema passed in will be updated automatically when there is a schema update + * event. When used for Writer creation, it should be the latest schema. So when you are trying to + * reuse a stream, you should use Builder newBuilder( String streamOrTableName, + * BigQueryWriteClient client) instead, so the created Writer will be based on a fresh schema. + * + * @param streamOrTableName name of the stream that must follow + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" or table name + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+" + * @param tableSchema The schema of the table when the stream was created, which is passed back + * through {@code WriteStream} + * @return Builder + */ + public static Builder newBuilder(String streamOrTableName, TableSchema tableSchema) { + return new Builder( + SchemaAwareStreamWriter.newBuilder( + streamOrTableName, tableSchema, JsonToProtoMessage.INSTANCE)); + } + + /** + * newBuilder that constructs a JsonStreamWriter builder. + * + *

The table schema passed in will be updated automatically when there is a schema update + * event. When used for Writer creation, it should be the latest schema. So when you are trying to + * reuse a stream, you should use Builder newBuilder( String streamOrTableName, + * BigQueryWriteClient client) instead, so the created Writer will be based on a fresh schema. + * + * @param streamOrTableName name of the stream that must follow + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" + * @param tableSchema The schema of the table when the stream was created, which is passed back + * through {@code WriteStream} + * @param client + * @return Builder + */ + public static Builder newBuilder( + String streamOrTableName, TableSchema tableSchema, BigQueryWriteClient client) { + return new Builder( + SchemaAwareStreamWriter.newBuilder( + streamOrTableName, tableSchema, client, JsonToProtoMessage.INSTANCE)); + } + + /** + * newBuilder that constructs a JsonStreamWriter builder with TableSchema being initialized by + * StreamWriter by default. + * + * @param streamOrTableName name of the stream that must follow + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" + * @param client BigQueryWriteClient + * @return Builder + */ + public static Builder newBuilder(String streamOrTableName, BigQueryWriteClient client) { + return new Builder( + SchemaAwareStreamWriter.newBuilder(streamOrTableName, client, JsonToProtoMessage.INSTANCE)); + } + + /** + * Sets the maximum time a request is allowed to be waiting in request waiting queue. Under very + * low chance, it's possible for append request to be waiting indefintely for request callback + * when Google networking SDK does not detect the networking breakage. The default timeout is 15 + * minutes. We are investigating the root cause for callback not triggered by networking SDK. + */ + public static void setMaxRequestCallbackWaitTime(Duration waitTime) { + ConnectionWorker.MAXIMUM_REQUEST_CALLBACK_WAIT_TIME = waitTime; + } + + @Override + public void close() { + this.schemaAwareStreamWriter.close(); + } + + /** + * @return if a Json writer can no longer be used for writing. It is due to either the + * JsonStreamWriter is explicitly closed or the underlying connection is broken when + * connection pool is not used. Client should recreate JsonStreamWriter in this case. + */ + public boolean isClosed() { + return this.schemaAwareStreamWriter.isClosed(); + } + + /** + * @return if user explicitly closed the writer. + */ + public boolean isUserClosed() { + return this.schemaAwareStreamWriter.isUserClosed(); + } + + public static final class Builder { + private final SchemaAwareStreamWriter.Builder schemaAwareStreamWriterBuilder; + + private Builder(SchemaAwareStreamWriter.Builder schemaAwareStreamWriterBuilder) { + this.schemaAwareStreamWriterBuilder = schemaAwareStreamWriterBuilder.setClientId(CLIENT_ID); + } + + /** + * Setter for the underlying StreamWriter's TransportChannelProvider. + * + * @param channelProvider + * @return Builder + */ + public Builder setChannelProvider(TransportChannelProvider channelProvider) { + this.schemaAwareStreamWriterBuilder.setChannelProvider(channelProvider); + return this; + } + + /** + * Setter for the underlying StreamWriter's CredentialsProvider. + * + * @param credentialsProvider + * @return Builder + */ + public Builder setCredentialsProvider(CredentialsProvider credentialsProvider) { + this.schemaAwareStreamWriterBuilder.setCredentialsProvider(credentialsProvider); + return this; + } + + /** + * Setter for the underlying StreamWriter's ExecutorProvider. + * + * @param executorProvider + * @return + */ + public Builder setExecutorProvider(ExecutorProvider executorProvider) { + this.schemaAwareStreamWriterBuilder.setExecutorProvider(executorProvider); + return this; + } + + /** + * Setter for the underlying StreamWriter's FlowControlSettings. + * + * @param flowControlSettings + * @return Builder + */ + public Builder setFlowControlSettings(FlowControlSettings flowControlSettings) { + this.schemaAwareStreamWriterBuilder.setFlowControlSettings(flowControlSettings); + return this; + } + + /** + * Stream name on the builder. + * + * @return Builder + */ + public String getStreamName() { + return this.schemaAwareStreamWriterBuilder.getStreamName(); + } + + /** + * Setter for the underlying StreamWriter's Endpoint. + * + * @param endpoint + * @return Builder + */ + public Builder setEndpoint(String endpoint) { + this.schemaAwareStreamWriterBuilder.setEndpoint(endpoint); + return this; + } + + /** + * Setter for a traceId to help identify traffic origin. + * + * @param traceId + * @return Builder + */ + public Builder setTraceId(String traceId) { + this.schemaAwareStreamWriterBuilder.setTraceId(traceId); + return this; + } + + /** + * Setter for a ignoreUnkownFields, if true, unknown Json fields to BigQuery will be ignored + * instead of error out. + * + * @param ignoreUnknownFields + * @return Builder + */ + public Builder setIgnoreUnknownFields(boolean ignoreUnknownFields) { + this.schemaAwareStreamWriterBuilder.setIgnoreUnknownFields(ignoreUnknownFields); + return this; + } + + /** This parameter is not used. It will be removed soon. */ + public Builder setReconnectAfter10M(boolean reconnectAfter10M) { + return this; + } + + /** + * Enables a static shared bidi-streaming connection pool that would dynamically scale up + * connections based on backlog within each individual connection. A single table's traffic + * might be splitted into multiple connections if needed. Different tables' traffic can also be + * multiplexed within the same connection. + * + *
+     * Each connection pool would have a upper limit (default to 20) and lower limit (default to
+     * 2) for the number of active connections. This parameter can be tuned via a static method
+     * exposed on {@link ConnectionWorkerPool}.
+     *
+     * Example:
+     * ConnectionWorkerPool.setOptions(
+     *     Settings.builder().setMinConnectionsPerRegion(4).setMaxConnectionsPerRegion(10).build());
+     *
+     * 
+ * + * @param enableConnectionPool + * @return Builder + */ + public Builder setEnableConnectionPool(boolean enableConnectionPool) { + this.schemaAwareStreamWriterBuilder.setEnableConnectionPool(enableConnectionPool); + return this; + } + + /** + * Location of the table this stream writer is targeting. Connection pools are shared by + * location. + * + * @param location + * @return Builder + */ + public Builder setLocation(String location) { + this.schemaAwareStreamWriterBuilder.setLocation(location); + return this; + } + + /** + * Sets the compression to use for the calls. The compressor must be of type gzip. + * + * @param compressorName + * @return Builder + */ + public Builder setCompressorName(String compressorName) { + this.schemaAwareStreamWriterBuilder.setCompressorName(compressorName); + return this; + } + + /** + * Enable client lib automatic retries on request level errors. + * + *
+     * Immediate Retry code:
+     * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED
+     * Backoff Retry code:
+     * RESOURCE_EXHAUSTED
+     *
+     * Example:
+     * RetrySettings retrySettings = RetrySettings.newBuilder()
+     *      .setInitialRetryDelay(Duration.ofMillis(500)) // applies to backoff retry
+     *      .setRetryDelayMultiplier(1.1) // applies to backoff retry
+     *      .setMaxAttempts(5) // applies to both retries
+     *      .setMaxRetryDelay(Duration.ofMinutes(1)) // applies to backoff retry .build();
+     * 
+ * + * @param retrySettings + * @return + */ + public Builder setRetrySettings(RetrySettings retrySettings) { + this.schemaAwareStreamWriterBuilder.setRetrySettings(retrySettings); + return this; + } + + /** + * Enable a latency profiler that would periodically generate a detailed latency report for the + * top latency requests. This is currently an experimental API. + */ + public Builder setEnableLatencyProfiler(boolean enableLatencyProfiler) { + this.schemaAwareStreamWriterBuilder.setEnableLatencyProfiler(enableLatencyProfiler); + return this; + } + + /** Enable generation of metrics for OpenTelemetry. */ + public Builder setEnableOpenTelemetry(boolean enableOpenTelemetry) { + this.schemaAwareStreamWriterBuilder.setEnableOpenTelemetry(enableOpenTelemetry); + return this; + } + + /** + * Sets the default missing value interpretation value if the column is not presented in the + * missing_value_interpretations map. + * + *

If this value is set to `DEFAULT_VALUE`, we will always populate default value if the + * field is missing from json and default value is defined in the column. + * + *

If this value is set to `NULL_VALUE`, we will always not populate default value. + */ + public Builder setDefaultMissingValueInterpretation( + AppendRowsRequest.MissingValueInterpretation defaultMissingValueInterpretation) { + this.schemaAwareStreamWriterBuilder.setDefaultMissingValueInterpretation( + defaultMissingValueInterpretation); + return this; + } + + /** + * Sets the missing value interpretation map for the JsonStreamWriter. The input + * missingValueInterpretationMap is used for all append requests unless otherwise changed. + * + * @param missingValueInterpretationMap the missing value interpretation map used by the + * JsonStreamWriter. + * @return Builder + */ + public Builder setMissingValueInterpretationMap( + Map missingValueInterpretationMap) { + this.schemaAwareStreamWriterBuilder.setMissingValueInterpretationMap( + missingValueInterpretationMap); + return this; + } + + /** + * Builds JsonStreamWriter + * + * @return JsonStreamWriter + */ + public JsonStreamWriter build() + throws Descriptors.DescriptorValidationException, + IllegalArgumentException, + IOException, + InterruptedException { + return new JsonStreamWriter(this.schemaAwareStreamWriterBuilder); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java new file mode 100644 index 000000000000..582795d27cca --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java @@ -0,0 +1,1146 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static java.time.temporal.ChronoField.HOUR_OF_DAY; +import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; +import static java.time.temporal.ChronoField.NANO_OF_SECOND; +import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; + +import com.google.api.pathtemplate.ValidationException; +import com.google.cloud.bigquery.storage.v1.Exceptions.RowIndexToErrorException; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.common.primitives.Doubles; +import com.google.common.primitives.Ints; +import com.google.common.primitives.Longs; +import com.google.protobuf.ByteString; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.FieldDescriptor; +import com.google.protobuf.DynamicMessage; +import com.google.protobuf.Timestamp; +import com.google.protobuf.UninitializedMessageException; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.time.format.DateTimeParseException; +import java.time.format.TextStyle; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +/** + * Converts JSON data to Protobuf messages given the Protobuf descriptor and BigQuery table schema. + * The Protobuf descriptor must have all fields lowercased. + */ +public class JsonToProtoMessage implements ToProtoConverter { + public static final JsonToProtoMessage INSTANCE = new JsonToProtoMessage(); + private static final int NUMERIC_SCALE = 9; + private static final ImmutableMap FIELD_TYPE_TO_DEBUG_MESSAGE = + new ImmutableMap.Builder() + .put(FieldDescriptor.Type.BOOL, "boolean") + .put(FieldDescriptor.Type.BYTES, "bytes") + .put(FieldDescriptor.Type.INT32, "int32") + .put(FieldDescriptor.Type.DOUBLE, "double") + .put(FieldDescriptor.Type.INT64, "int64") + .put(FieldDescriptor.Type.STRING, "string") + .put(FieldDescriptor.Type.MESSAGE, "object") + .build(); + + private static final DateTimeFormatter TO_TIMESTAMP_FORMATTER = + new DateTimeFormatterBuilder() + .parseLenient() + .append(DateTimeFormatter.ISO_LOCAL_DATE) + .optionalStart() + .appendLiteral('T') + .optionalEnd() + .appendValue(HOUR_OF_DAY, 2) + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2) + .optionalEnd() + .optionalStart() + .appendFraction(NANO_OF_SECOND, 6, 9, true) + .optionalEnd() + .optionalStart() + .appendOffset("+HHMM", "+00:00") + .optionalEnd() + .toFormatter() + .withZone(ZoneOffset.UTC); + + private static final DateTimeFormatter FROM_TIMESTAMP_FORMATTER = + new DateTimeFormatterBuilder() + .parseLenient() + .append(DateTimeFormatter.ofPattern("yyyy[/][-]MM[/][-]dd")) + .optionalStart() + .appendLiteral('T') + .optionalEnd() + .optionalStart() + .appendLiteral(' ') + .optionalEnd() + .appendValue(ChronoField.HOUR_OF_DAY, 2) + .appendLiteral(':') + .appendValue(ChronoField.MINUTE_OF_HOUR, 2) + .optionalStart() + .appendLiteral(':') + .appendValue(ChronoField.SECOND_OF_MINUTE, 2) + .optionalEnd() + .optionalStart() + .appendValue(ChronoField.MILLI_OF_SECOND, 3) + .optionalEnd() + .optionalStart() + .appendFraction(ChronoField.MICRO_OF_SECOND, 3, 6, true) + .optionalEnd() + .optionalStart() + .appendFraction(ChronoField.NANO_OF_SECOND, 6, 9, true) + .optionalEnd() + .optionalStart() + .appendLiteral(' ') + .optionalEnd() + .optionalStart() + .appendZoneOrOffsetId() + .optionalEnd() + .optionalStart() + .appendZoneText(TextStyle.SHORT) + .optionalEnd() + .toFormatter() + .withZone(ZoneOffset.UTC); + + private static final DateTimeFormatter DATETIME_FORMATTER = + new DateTimeFormatterBuilder() + .parseLenient() + .append(DateTimeFormatter.ISO_LOCAL_DATE) + .optionalStart() + .optionalStart() + .parseCaseInsensitive() + .appendLiteral('T') + .optionalEnd() + .optionalStart() + .appendLiteral(' ') + .optionalEnd() + .append(DateTimeFormatter.ISO_LOCAL_TIME) + .optionalEnd() + .parseDefaulting(ChronoField.HOUR_OF_DAY, 0) + .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) + .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) + .toFormatter(); + + // Regex to identify >9 digits in the fraction part (e.g. `.123456789123`) + // Matches the dot, followed by 10+ digits (fractional part), followed by non-digits (like `+00`) + // or end of string + private static final Pattern ISO8601_TIMESTAMP_HIGH_PRECISION_PATTERN = + Pattern.compile("\\.(\\d{10,})(?:\\D|$)"); + private static final long MICROS_PER_SECOND = 1_000_000; + private static final int NANOS_PER_MICRO = 1_000; + + /** You can use {@link #INSTANCE} instead */ + public JsonToProtoMessage() {} + + public static DynamicMessage convertJsonToProtoMessage( + Descriptor protoSchema, + TableSchema tableSchema, + JSONObject json, + boolean ignoreUnknownFields) { + return INSTANCE.convertToProtoMessage(protoSchema, tableSchema, json, ignoreUnknownFields); + } + + public static DynamicMessage convertJsonToProtoMessage(Descriptor protoSchema, JSONObject json) { + return INSTANCE.convertToProtoMessage(protoSchema, json); + } + + public static DynamicMessage convertJsonToProtoMessage( + Descriptor protoSchema, TableSchema tableSchema, JSONObject json) { + return INSTANCE.convertToProtoMessage(protoSchema, tableSchema, json); + } + + /** + * Converts input message to Protobuf. + * + *

WARNING: it's much more efficient to call the other APIs accepting json array if the jsons + * share the same table schema. + * + * @param protoSchema the schema of the output Protobuf schems. + * @param tableSchema tha underlying table schema for which Protobuf is being built. + * @param json the input JSON object converted to Protobuf. + * @param ignoreUnknownFields flag indicating that the additional fields not present in the output + * schema should be accepted. + * @return Converted message in Protobuf format. + */ + public DynamicMessage convertToProtoMessage( + Descriptor protoSchema, TableSchema tableSchema, Object json, boolean ignoreUnknownFields) { + return convertToProtoMessage(protoSchema, tableSchema, (JSONObject) json, ignoreUnknownFields); + } + + /** + * Converts Json array to list of Protobuf + * + * @param protoSchema the schema of the output Protobuf schems. + * @param tableSchema tha underlying table schema for which Protobuf is being built. + * @param jsonArray the input JSON array converted to Protobuf. + * @param ignoreUnknownFields flag indicating that the additional fields not present in the output + * schema should be accepted. + * @return Converted message in Protobuf format. + */ + @Override + public List convertToProtoMessage( + Descriptor protoSchema, + TableSchema tableSchema, + Iterable jsonArray, + boolean ignoreUnknownFields) { + return convertToProtoMessage( + protoSchema, tableSchema, (JSONArray) jsonArray, ignoreUnknownFields); + } + + /** + * Converts Json data to protocol buffer messages given the protocol buffer descriptor. + * + *

WARNING: it's much more efficient to call the other APIs accepting json array if the jsons + * share the same table schema. + * + * @param protoSchema + * @param json + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + public DynamicMessage convertToProtoMessage(Descriptor protoSchema, JSONObject json) + throws IllegalArgumentException { + Preconditions.checkNotNull(json, "JSONObject is null."); + Preconditions.checkNotNull(protoSchema, "Protobuf descriptor is null."); + Preconditions.checkState(json.length() != 0, "JSONObject is empty."); + + return convertToProtoMessage(protoSchema, null, json, "root", false); + } + + /** + * Converts Json data to protocol buffer messages given the protocol buffer descriptor. + * + *

WARNING: it's much more efficient to call the other APIs accepting json array if the jsons + * share the same table schema. + * + * @param protoSchema + * @param tableSchema bigquery table schema is needed for type conversion of DATETIME, TIME, + * NUMERIC, BIGNUMERIC + * @param json + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + public DynamicMessage convertToProtoMessage( + Descriptor protoSchema, TableSchema tableSchema, JSONObject json) + throws IllegalArgumentException { + Preconditions.checkNotNull(json, "JSONObject is null."); + Preconditions.checkNotNull(protoSchema, "Protobuf descriptor is null."); + Preconditions.checkNotNull(tableSchema, "TableSchema is null."); + Preconditions.checkState(json.length() != 0, "JSONObject is empty."); + + return convertToProtoMessage(protoSchema, tableSchema.getFieldsList(), json, "root", false); + } + + /** + * Converts Json data to protocol buffer messages given the protocol buffer descriptor. + * + *

WARNING: it's much more efficient to call the other APIs accepting json array if the jsons + * share the same table schema. + * + * @param protoSchema + * @param tableSchema bigquery table schema is needed for type conversion of DATETIME, TIME, + * NUMERIC, BIGNUMERIC + * @param json + * @param ignoreUnknownFields allows unknown fields in JSON input to be ignored. + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + public DynamicMessage convertToProtoMessage( + Descriptor protoSchema, TableSchema tableSchema, JSONObject json, boolean ignoreUnknownFields) + throws IllegalArgumentException { + Preconditions.checkNotNull(json, "JSONObject is null."); + Preconditions.checkNotNull(protoSchema, "Protobuf descriptor is null."); + Preconditions.checkNotNull(tableSchema, "TableSchema is null."); + Preconditions.checkState(json.length() != 0, "JSONObject is empty."); + return convertToProtoMessage( + protoSchema, tableSchema.getFieldsList(), json, "root", ignoreUnknownFields); + } + + /** + * Converts Json array to list of protocol buffer messages given the protocol buffer descriptor. + * + * @param protoSchema + * @param tableSchema bigquery table schema is needed for type conversion of DATETIME, TIME, + * NUMERIC, BIGNUMERIC + * @param jsonArray + * @param ignoreUnknownFields allows unknown fields in JSON input to be ignored. + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + public List convertToProtoMessage( + Descriptor protoSchema, + TableSchema tableSchema, + JSONArray jsonArray, + boolean ignoreUnknownFields) + throws IllegalArgumentException { + Preconditions.checkNotNull(jsonArray, "jsonArray is null."); + Preconditions.checkNotNull(protoSchema, "Protobuf descriptor is null."); + Preconditions.checkNotNull(tableSchema, "tableSchema is null."); + Preconditions.checkState(jsonArray.length() != 0, "jsonArray is empty."); + + return convertToProtoMessage( + protoSchema, tableSchema.getFieldsList(), jsonArray, "root", ignoreUnknownFields); + } + + private DynamicMessage convertToProtoMessage( + Descriptor protoSchema, + List tableSchema, + JSONObject jsonObject, + String jsonScope, + boolean ignoreUnknownFields) { + JSONArray jsonArray = new JSONArray(); + jsonArray.put(jsonObject); + return convertToProtoMessage( + protoSchema, tableSchema, jsonArray, jsonScope, ignoreUnknownFields) + .get(0); + } + + /** + * Converts Json data to protocol buffer messages given the protocol buffer descriptor. + * + * @param protoSchema + * @param json + * @param jsonScope Debugging purposes + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + private List convertToProtoMessage( + Descriptor protoSchema, + List tableSchema, + JSONArray jsonArray, + String jsonScope, + boolean ignoreUnknownFields) + throws RowIndexToErrorException { + List messageList = new ArrayList<>(); + Map jsonNameToMetadata = new HashMap<>(); + Map rowIndexToErrorMessage = new HashMap<>(); + + boolean hasDataUnknownError = false; + for (int i = 0; i < jsonArray.length(); i++) { + try { + DynamicMessage.Builder protoMsg = DynamicMessage.newBuilder(protoSchema); + JSONObject jsonObject = jsonArray.getJSONObject(i); + String[] jsonNames = JSONObject.getNames(jsonObject); + if (jsonNames == null) { + messageList.add(protoMsg.build()); + continue; + } + for (String jsonName : jsonNames) { + String currentScope = jsonScope + "." + jsonName; + FieldDescriptorAndFieldTableSchema fieldDescriptorAndFieldTableSchema = + jsonNameToMetadata.computeIfAbsent( + currentScope, + k -> { + return computeDescriptorAndSchema( + currentScope, ignoreUnknownFields, jsonName, protoSchema, tableSchema); + }); + if (fieldDescriptorAndFieldTableSchema == null) { + continue; + } + FieldDescriptor field = fieldDescriptorAndFieldTableSchema.fieldDescriptor; + TableFieldSchema tableFieldSchema = fieldDescriptorAndFieldTableSchema.tableFieldSchema; + try { + if (!field.isRepeated()) { + fillField( + protoMsg, + field, + tableFieldSchema, + jsonObject, + jsonName, + currentScope, + ignoreUnknownFields); + } else { + fillRepeatedField( + protoMsg, + field, + tableFieldSchema, + jsonObject, + jsonName, + currentScope, + ignoreUnknownFields); + } + } catch (Exceptions.FieldParseError ex) { + throw ex; + } catch (Exception ex) { + // This function is recursively called, so this throw will be caught and throw directly + // out by the catch above. + throw new Exceptions.FieldParseError( + currentScope, + tableFieldSchema != null + ? tableFieldSchema.getType().name() + : field.getType().name(), + ex); + } + } + DynamicMessage msg; + try { + msg = protoMsg.build(); + } catch (UninitializedMessageException e) { + String errorMsg = e.getMessage(); + int idxOfColon = errorMsg.indexOf(":"); + String missingFieldName = errorMsg.substring(idxOfColon + 2); + throw new IllegalArgumentException( + String.format( + "JSONObject does not have the required field %s.%s.", + jsonScope, missingFieldName)); + } + messageList.add(msg); + } catch (IllegalArgumentException exception) { + if (exception instanceof Exceptions.DataHasUnknownFieldException) { + hasDataUnknownError = true; + } + if (exception instanceof Exceptions.FieldParseError) { + Exceptions.FieldParseError ex = (Exceptions.FieldParseError) exception; + rowIndexToErrorMessage.put( + i, + "Field " + + ex.getFieldName() + + " failed to convert to " + + ex.getBqType() + + ". Error: " + + ex.getCause().getMessage()); + } else { + rowIndexToErrorMessage.put(i, exception.getMessage()); + } + } + } + if (!rowIndexToErrorMessage.isEmpty()) { + throw new RowIndexToErrorException(rowIndexToErrorMessage, hasDataUnknownError); + } + return messageList; + } + + private static final class FieldDescriptorAndFieldTableSchema { + TableFieldSchema tableFieldSchema; + + // Field descriptor + FieldDescriptor fieldDescriptor; + } + + private FieldDescriptorAndFieldTableSchema computeDescriptorAndSchema( + String currentScope, + boolean ignoreUnknownFields, + String jsonName, + Descriptor protoSchema, + List tableFieldSchemaList) { + + // We want lowercase here to support case-insensitive data writes. + // The protobuf descriptor that is used is assumed to have all lowercased fields + String jsonFieldLocator = jsonName.toLowerCase(); + + // If jsonName is not compatible with proto naming convention, we should look by its + // placeholder name. + if (!BigQuerySchemaUtil.isProtoCompatible(jsonFieldLocator)) { + jsonFieldLocator = BigQuerySchemaUtil.generatePlaceholderFieldName(jsonFieldLocator); + } + + FieldDescriptor field = protoSchema.findFieldByName(jsonFieldLocator); + if (field == null && !ignoreUnknownFields) { + throw new Exceptions.DataHasUnknownFieldException(currentScope); + } else if (field == null) { + return null; + } + TableFieldSchema fieldSchema = null; + if (tableFieldSchemaList != null) { + // protoSchema is generated from tableSchema so their field ordering should match. + fieldSchema = tableFieldSchemaList.get(field.getIndex()); + // For RANGE type, expliclitly add the fields start and end of the same FieldElementType as it + // is not expliclity defined in the TableFieldSchema. + if (fieldSchema.getType() == TableFieldSchema.Type.RANGE) { + switch (fieldSchema.getRangeElementType().getType()) { + case DATE: + case DATETIME: + case TIMESTAMP: + fieldSchema = + fieldSchema.toBuilder() + .addFields( + TableFieldSchema.newBuilder() + .setName("start") + .setType(fieldSchema.getRangeElementType().getType()) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("end") + .setType(fieldSchema.getRangeElementType().getType()) + .build()) + .build(); + break; + default: + throw new ValidationException( + "Field at index " + + field.getIndex() + + " with name (" + + fieldSchema.getName() + + ") with type (RANGE) has an unsupported range element type (" + + fieldSchema.getRangeElementType() + + ")"); + } + } + + if (!fieldSchema.getName().toLowerCase().equals(BigQuerySchemaUtil.getFieldName(field))) { + throw new ValidationException( + "Field at index " + + field.getIndex() + + " has mismatch names (" + + fieldSchema.getName() + + ") (" + + field.getName() + + ")"); + } + } + FieldDescriptorAndFieldTableSchema fieldDescriptorAndFieldTableSchema = + new FieldDescriptorAndFieldTableSchema(); + fieldDescriptorAndFieldTableSchema.fieldDescriptor = field; + fieldDescriptorAndFieldTableSchema.tableFieldSchema = fieldSchema; + return fieldDescriptorAndFieldTableSchema; + } + + /** + * Fills a non-repetaed protoField with the json data. + * + * @param protoMsg The protocol buffer message being constructed + * @param fieldDescriptor Proto format to be transmitted over the wire (derived from table schema + * via BQTableSchemaToProtoDescriptor.BQTableSchemaModeMap) + * @param fieldSchema Actual table column schema type if available + * @param json + * @param exactJsonKeyName Exact key name in JSONObject instead of lowercased version + * @param currentScope Debugging purposes + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + private void fillField( + DynamicMessage.Builder protoMsg, + FieldDescriptor fieldDescriptor, + TableFieldSchema fieldSchema, + JSONObject json, + String exactJsonKeyName, + String currentScope, + boolean ignoreUnknownFields) + throws IllegalArgumentException { + java.lang.Object val = json.get(exactJsonKeyName); + if (val == JSONObject.NULL) { + return; + } + switch (fieldDescriptor.getType()) { + case BOOL: + if (val instanceof Boolean) { + protoMsg.setField(fieldDescriptor, val); + return; + } + if (val instanceof String + && ("true".equalsIgnoreCase(((String) val)) + || "false".equalsIgnoreCase(((String) val)))) { + protoMsg.setField(fieldDescriptor, Boolean.parseBoolean((String) val)); + return; + } + break; + case BYTES: + if (fieldSchema != null) { + if (fieldSchema.getType() == TableFieldSchema.Type.NUMERIC) { + if (val instanceof String) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal((String) val))); + return; + } else if (val instanceof Short || val instanceof Integer || val instanceof Long) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal(((Number) val).longValue()))); + return; + } else if (val instanceof Float || val instanceof Double) { + // In JSON, the precision passed in is machine dependent. We should round the number + // before passing to backend. + BigDecimal bigDecimal = new BigDecimal(String.valueOf(val)); + if (bigDecimal.scale() > 9) { + bigDecimal = bigDecimal.setScale(NUMERIC_SCALE, RoundingMode.HALF_UP); + } + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString(bigDecimal)); + return; + } else if (val instanceof BigDecimal) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString((BigDecimal) val)); + return; + } + } else if (fieldSchema.getType() == TableFieldSchema.Type.BIGNUMERIC) { + if (val instanceof String) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToBigNumericByteString( + new BigDecimal((String) val))); + return; + } else if (val instanceof Short || val instanceof Integer || val instanceof Long) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToBigNumericByteString( + new BigDecimal(((Number) val).longValue()))); + return; + } else if (val instanceof Float || val instanceof Double) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToBigNumericByteString( + new BigDecimal(String.valueOf(val)))); + return; + } else if (val instanceof BigDecimal) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToBigNumericByteString((BigDecimal) val)); + return; + } + } + } + if (val instanceof ByteString) { + protoMsg.setField(fieldDescriptor, ((ByteString) val).toByteArray()); + return; + } else if (val instanceof JSONArray) { + byte[] bytes = new byte[((JSONArray) val).length()]; + for (int j = 0; j < ((JSONArray) val).length(); j++) { + bytes[j] = (byte) ((JSONArray) val).getInt(j); + if (bytes[j] != ((JSONArray) val).getInt(j)) { + throw new IllegalArgumentException( + String.format( + "Error: " + currentScope + "[" + j + "] could not be converted to byte[].")); + } + } + protoMsg.setField(fieldDescriptor, bytes); + return; + } + break; + case INT64: + if (fieldSchema != null) { + if (fieldSchema.getType() == TableFieldSchema.Type.DATETIME) { + if (val instanceof String) { + protoMsg.setField( + fieldDescriptor, + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.parse((String) val, DATETIME_FORMATTER))); + return; + } else if (val instanceof Long) { + protoMsg.setField(fieldDescriptor, val); + return; + } + } else if (fieldSchema.getType() == TableFieldSchema.Type.TIME) { + if (val instanceof String) { + protoMsg.setField( + fieldDescriptor, + CivilTimeEncoder.encodePacked64TimeMicrosLocalTime( + LocalTime.parse((String) val))); + return; + } else if (val instanceof Long) { + protoMsg.setField(fieldDescriptor, val); + return; + } + } else if (fieldSchema.getType() == TableFieldSchema.Type.TIMESTAMP) { + protoMsg.setField(fieldDescriptor, getTimestampAsLong(val)); + return; + } + } + if (val instanceof Integer) { + protoMsg.setField(fieldDescriptor, Long.valueOf((Integer) val)); + return; + } else if (val instanceof Long) { + protoMsg.setField(fieldDescriptor, val); + return; + } else if (val instanceof Byte) { + protoMsg.setField(fieldDescriptor, Long.valueOf((Byte) val)); + return; + } else if (val instanceof Short) { + protoMsg.setField(fieldDescriptor, Long.valueOf((Short) val)); + return; + } + if (val instanceof String) { + Long parsed = Longs.tryParse((String) val); + if (parsed != null) { + protoMsg.setField(fieldDescriptor, parsed); + return; + } + } + break; + case INT32: + if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.DATE) { + if (val instanceof String) { + protoMsg.setField(fieldDescriptor, (int) LocalDate.parse((String) val).toEpochDay()); + return; + } else if (val instanceof Integer || val instanceof Long) { + protoMsg.setField(fieldDescriptor, ((Number) val).intValue()); + return; + } + } + if (val instanceof Integer) { + protoMsg.setField(fieldDescriptor, val); + return; + } + if (val instanceof String) { + Integer parsed = Ints.tryParse((String) val); + if (parsed != null) { + protoMsg.setField(fieldDescriptor, parsed); + return; + } + } + break; + case STRING: + // Timestamp fields will be transmitted as a String if BQ's timestamp field is + // enabled to support picosecond. Check that the schema's field is timestamp before + // proceeding with the rest of the logic. Converts the supported types into a String. + // Supported types: https://docs.cloud.google.com/bigquery/docs/supported-data-types + if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.TIMESTAMP) { + protoMsg.setField(fieldDescriptor, getTimestampAsString(val)); + return; + } + if (val instanceof String) { + protoMsg.setField(fieldDescriptor, val); + return; + } else if (val instanceof Short + || val instanceof Integer + || val instanceof Long + || val instanceof Boolean) { + protoMsg.setField(fieldDescriptor, String.valueOf(val)); + return; + } + break; + case DOUBLE: + if (val instanceof Number) { + protoMsg.setField(fieldDescriptor, ((Number) val).doubleValue()); + return; + } + if (val instanceof String) { + Double parsed = Doubles.tryParse((String) val); + if (parsed != null) { + protoMsg.setField(fieldDescriptor, parsed); + return; + } + } + break; + case MESSAGE: + if (val instanceof JSONObject) { + protoMsg.setField( + fieldDescriptor, + convertToProtoMessage( + fieldDescriptor.getMessageType(), + fieldSchema == null ? null : fieldSchema.getFieldsList(), + json.getJSONObject(exactJsonKeyName), + currentScope, + ignoreUnknownFields)); + return; + } + break; + } + throw new IllegalArgumentException( + String.format( + "JSONObject does not have a %s field at %s.", + FIELD_TYPE_TO_DEBUG_MESSAGE.get(fieldDescriptor.getType()), currentScope)); + } + + /** + * Fills a repeated protoField with the json data. + * + * @param protoMsg The protocol buffer message being constructed + * @param fieldDescriptor Proto format to be transmitted over the wire (derived from table schema + * via BQTableSchemaToProtoDescriptor.BQTableSchemaModeMap) + * @param fieldSchema Actual table column schema type if available + * @param json If root level has no matching fields, throws exception. + * @param exactJsonKeyName Exact key name in JSONObject instead of lowercased version + * @param currentScope Debugging purposes + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + private void fillRepeatedField( + DynamicMessage.Builder protoMsg, + FieldDescriptor fieldDescriptor, + TableFieldSchema fieldSchema, + JSONObject json, + String exactJsonKeyName, + String currentScope, + boolean ignoreUnknownFields) + throws IllegalArgumentException { + + JSONArray jsonArray; + try { + jsonArray = json.getJSONArray(exactJsonKeyName); + } catch (JSONException e) { + java.lang.Object val = json.get(exactJsonKeyName); + // It is OK for repeated field to be null. + if (val == JSONObject.NULL) { + return; + } + throw new IllegalArgumentException( + "JSONObject does not have a array field at " + currentScope + "."); + } + java.lang.Object val; + int index; + for (int i = 0; i < jsonArray.length(); i++) { + val = jsonArray.get(i); + index = i; + switch (fieldDescriptor.getType()) { + case BOOL: + if (val instanceof Boolean) { + protoMsg.addRepeatedField(fieldDescriptor, val); + } else if (val instanceof String + && ("true".equalsIgnoreCase(((String) val)) + || "false".equalsIgnoreCase(((String) val)))) { + protoMsg.addRepeatedField(fieldDescriptor, Boolean.parseBoolean((String) val)); + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + break; + case BYTES: + boolean added = false; + if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.NUMERIC) { + if (val instanceof String) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal((String) val))); + added = true; + } else if (val instanceof Short || val instanceof Integer || val instanceof Long) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal(((Number) val).longValue()))); + added = true; + } else if (val instanceof Float || val instanceof Double) { + BigDecimal bigDecimal = new BigDecimal(String.valueOf(val)); + if (bigDecimal.scale() > 9) { + bigDecimal = bigDecimal.setScale(NUMERIC_SCALE, RoundingMode.HALF_UP); + } + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString(bigDecimal)); + added = true; + } else if (val instanceof BigDecimal) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString((BigDecimal) val)); + added = true; + } + } else if (fieldSchema != null + && fieldSchema.getType() == TableFieldSchema.Type.BIGNUMERIC) { + if (val instanceof String) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToBigNumericByteString( + new BigDecimal((String) val))); + added = true; + } else if (val instanceof Short || val instanceof Integer || val instanceof Long) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToBigNumericByteString( + new BigDecimal(((Number) val).longValue()))); + added = true; + } else if (val instanceof Float || val instanceof Double) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToBigNumericByteString( + new BigDecimal(String.valueOf(val)))); + added = true; + } else if (val instanceof BigDecimal) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToBigNumericByteString((BigDecimal) val)); + added = true; + } + } + if (!added) { + if (val instanceof ByteString) { + protoMsg.addRepeatedField(fieldDescriptor, ((ByteString) val).toByteArray()); + } else if (val instanceof byte[]) { + protoMsg.addRepeatedField(fieldDescriptor, val); + } else if (val instanceof JSONArray) { + try { + byte[] bytes = new byte[((JSONArray) val).length()]; + for (int j = 0; j < ((JSONArray) val).length(); j++) { + bytes[j] = (byte) ((JSONArray) val).getInt(j); + if (bytes[j] != ((JSONArray) val).getInt(j)) { + throw new IllegalArgumentException( + String.format( + "Error: " + + currentScope + + "[" + + index + + "] could not be converted to byte[].")); + } + } + protoMsg.addRepeatedField(fieldDescriptor, bytes); + } catch (JSONException e) { + throw new IllegalArgumentException( + String.format( + "Error: " + + currentScope + + "[" + + index + + "] could not be converted to byte[].")); + } + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + } + break; + case INT64: + if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.DATETIME) { + if (val instanceof String) { + protoMsg.addRepeatedField( + fieldDescriptor, + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.parse((String) val, DATETIME_FORMATTER))); + } else if (val instanceof Long) { + protoMsg.addRepeatedField(fieldDescriptor, val); + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + } else if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.TIME) { + if (val instanceof String) { + protoMsg.addRepeatedField( + fieldDescriptor, + CivilTimeEncoder.encodePacked64TimeMicrosLocalTime( + LocalTime.parse((String) val))); + } else if (val instanceof Long) { + protoMsg.addRepeatedField(fieldDescriptor, val); + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + } else if (fieldSchema != null + && fieldSchema.getType() == TableFieldSchema.Type.TIMESTAMP) { + protoMsg.addRepeatedField(fieldDescriptor, getTimestampAsLong(val)); + } else if (val instanceof Integer) { + protoMsg.addRepeatedField(fieldDescriptor, Long.valueOf((Integer) val)); + } else if (val instanceof Long) { + protoMsg.addRepeatedField(fieldDescriptor, val); + } else if (val instanceof Byte) { + protoMsg.addRepeatedField(fieldDescriptor, Long.valueOf((Byte) val)); + } else if (val instanceof Short) { + protoMsg.addRepeatedField(fieldDescriptor, Long.valueOf((Short) val)); + } else if (val instanceof String) { + Long parsed = Longs.tryParse((String) val); + if (parsed != null) { + protoMsg.addRepeatedField(fieldDescriptor, parsed); + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + break; + case INT32: + if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.DATE) { + if (val instanceof String) { + protoMsg.addRepeatedField( + fieldDescriptor, (int) LocalDate.parse((String) val).toEpochDay()); + } else if (val instanceof Integer || val instanceof Long) { + protoMsg.addRepeatedField(fieldDescriptor, ((Number) val).intValue()); + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + } else if (val instanceof Integer) { + protoMsg.addRepeatedField(fieldDescriptor, val); + } else if (val instanceof String) { + Integer parsed = Ints.tryParse((String) val); + if (parsed != null) { + protoMsg.addRepeatedField(fieldDescriptor, parsed); + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + break; + case STRING: + // Timestamp fields will be transmitted as a String if BQ's timestamp field is + // enabled to support picosecond. Check that the schema's field is timestamp before + // proceeding with the rest of the logic. Converts the supported types into a String. + // Supported types: https://docs.cloud.google.com/bigquery/docs/supported-data-types + if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.TIMESTAMP) { + protoMsg.addRepeatedField(fieldDescriptor, getTimestampAsString(val)); + return; + } + if (val instanceof String) { + protoMsg.addRepeatedField(fieldDescriptor, val); + } else if (val instanceof Short + || val instanceof Integer + || val instanceof Long + || val instanceof Boolean) { + protoMsg.addRepeatedField(fieldDescriptor, String.valueOf(val)); + return; + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + break; + case DOUBLE: + if (val instanceof Number) { + protoMsg.addRepeatedField(fieldDescriptor, ((Number) val).doubleValue()); + } else if (val instanceof String) { + Double parsed = Doubles.tryParse((String) val); + if (parsed != null) { + protoMsg.addRepeatedField(fieldDescriptor, parsed); + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + break; + case MESSAGE: + if (val instanceof JSONObject) { + protoMsg.addRepeatedField( + fieldDescriptor, + convertToProtoMessage( + fieldDescriptor.getMessageType(), + fieldSchema == null ? null : fieldSchema.getFieldsList(), + jsonArray.getJSONObject(i), + currentScope, + ignoreUnknownFields)); + } else { + throwWrongFieldType(fieldDescriptor, currentScope, index); + } + break; + } + } + } + + /** + * Converts microseconds from epoch to a Java Instant. + * + * @param micros the number of microseconds from 1970-01-01T00:00:00Z + * @return the Instant corresponding to the microseconds + */ + @VisibleForTesting + static Instant fromEpochMicros(long micros) { + long seconds = Math.floorDiv(micros, MICROS_PER_SECOND); + int nanos = (int) Math.floorMod(micros, MICROS_PER_SECOND) * NANOS_PER_MICRO; + + return Instant.ofEpochSecond(seconds, nanos); + } + + /** + * Best effort to try and convert a timestamp to an ISO8601 string. Standardize the timestamp + * output to be ISO_DATE_TIME (e.g. 2011-12-03T10:15:30+01:00) for timestamps up to nanosecond + * precision. For higher precision, the ISO8601 input is used as long as it is valid. + */ + @VisibleForTesting + static String getTimestampAsString(Object val) { + if (val instanceof String) { + String value = (String) val; + Double parsed = Doubles.tryParse(value); + // If true, it was a numeric value inside a String + if (parsed != null) { + return getTimestampAsString(parsed.longValue()); + } + // Validate the ISO8601 values before sending it to the server. + validateTimestamp(value); + + // If it's high precision (more than 9 digits), then return the ISO8601 string as-is + // as JDK does not have a DateTimeFormatter that supports more than nanosecond precision. + Matcher matcher = ISO8601_TIMESTAMP_HIGH_PRECISION_PATTERN.matcher(value); + if (matcher.find()) { + return value; + } + // Otherwise, output the timestamp to a standard format before sending it to BQ + Instant instant = FROM_TIMESTAMP_FORMATTER.parse(value, Instant::from); + return TO_TIMESTAMP_FORMATTER.format(instant); + } else if (val instanceof Number) { + // Micros from epoch will most likely will be represented a Long, but any numeric + // value can be used + Instant instant = fromEpochMicros(((Number) val).longValue()); + return TO_TIMESTAMP_FORMATTER.format(instant); + } else if (val instanceof Timestamp) { + // Convert the Protobuf timestamp class to ISO8601 string + Timestamp timestamp = (Timestamp) val; + return TO_TIMESTAMP_FORMATTER.format( + Instant.ofEpochSecond(timestamp.getSeconds(), timestamp.getNanos())); + } + throw new IllegalArgumentException("The timestamp value passed in is not from a valid type"); + } + + /* Best effort to try and convert the Object to a long (microseconds since epoch) */ + private long getTimestampAsLong(Object val) { + if (val instanceof String) { + Double parsed = Doubles.tryParse((String) val); + if (parsed != null) { + return parsed.longValue(); + } + TemporalAccessor parsedTime = FROM_TIMESTAMP_FORMATTER.parse((String) val); + return parsedTime.getLong(ChronoField.INSTANT_SECONDS) * 1000000 + + parsedTime.getLong(ChronoField.MICRO_OF_SECOND); + } else if (val instanceof Number) { + return ((Number) val).longValue(); + } + throw new IllegalArgumentException("The timestamp value passed in is not from a valid type"); + } + + private static void throwWrongFieldType( + FieldDescriptor fieldDescriptor, String currentScope, int index) { + throw new IllegalArgumentException( + String.format( + "JSONObject does not have a %s field at %s[%d].", + FIELD_TYPE_TO_DEBUG_MESSAGE.get(fieldDescriptor.getType()), currentScope, index)); + } + + /** + * Internal helper method to check that the timestamp follows the expected String input of ISO8601 + * string. Allows the fractional portion of the timestamp to support up to 12 digits of precision + * (up to picosecond). + * + * @throws IllegalArgumentException if timestamp is invalid or exceeds picosecond precision + */ + @VisibleForTesting + static void validateTimestamp(String timestamp) { + // Check if the string has greater than nanosecond precision (>9 digits in fractional second) + Matcher matcher = ISO8601_TIMESTAMP_HIGH_PRECISION_PATTERN.matcher(timestamp); + if (matcher.find()) { + // Group 1 is the fractional second part of the ISO8601 string + String fraction = matcher.group(1); + // Pos 10-12 of the fractional second are guaranteed to be digits. The regex only + // matches the fraction section as long as they are digits. + if (fraction.length() > 12) { + throw new IllegalArgumentException( + "Fractional second portion of ISO8601 only supports up to picosecond (12 digits) in" + + " BigQuery"); + } + + // Replace the entire fractional second portion with just the nanosecond portion. + // The new timestamp will be validated against the JDK's DateTimeFormatter + String truncatedFraction = fraction.substring(0, 9); + timestamp = + new StringBuilder(timestamp) + .replace(matcher.start(1), matcher.end(1), truncatedFraction) + .toString(); + } + + // It is valid as long as DateTimeFormatter doesn't throw an exception + try { + FROM_TIMESTAMP_FORMATTER.parse((String) timestamp); + } catch (DateTimeParseException e) { + throw new IllegalArgumentException(e.getMessage(), e); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaConverter.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaConverter.java new file mode 100644 index 000000000000..4897e1a41953 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaConverter.java @@ -0,0 +1,129 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.protobuf.DescriptorProtos.DescriptorProto; +import com.google.protobuf.DescriptorProtos.EnumDescriptorProto; +import com.google.protobuf.DescriptorProtos.FieldDescriptorProto; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.FieldDescriptor; +import io.grpc.Status; +import java.util.HashSet; +import java.util.Set; + +// A Converter class that turns a native protobuf::DescriptorProto to a self contained +// protobuf::DescriptorProto +// that can be reconstructed by the backend. +public class ProtoSchemaConverter { + private static String getNameFromFullName(String fullName) { + return fullName.replace('.', '_'); + } + + private static ProtoSchema convertInternal( + Descriptor input, + Set visitedTypes, + Set enumTypes, + Set structTypes, + DescriptorProto.Builder rootProtoSchema) { + DescriptorProto.Builder resultProto = DescriptorProto.newBuilder(); + if (rootProtoSchema == null) { + rootProtoSchema = resultProto; + } + String protoFullName = input.getFullName(); + String protoName = getNameFromFullName(protoFullName); + resultProto.setName(protoName); + Set localEnumTypes = new HashSet(); + visitedTypes.add(input.getFullName()); + for (int i = 0; i < input.getFields().size(); i++) { + FieldDescriptor inputField = input.getFields().get(i); + FieldDescriptorProto.Builder resultField = inputField.toProto().toBuilder(); + if (inputField.getType() == FieldDescriptor.Type.GROUP + || inputField.getType() == FieldDescriptor.Type.MESSAGE) { + String msgFullName = inputField.getMessageType().getFullName(); + String msgName = getNameFromFullName(msgFullName); + if (structTypes.contains(msgFullName)) { + resultField.setTypeName(msgName); + } else { + if (visitedTypes.contains(msgFullName)) { + throw new InvalidArgumentException( + "Recursive type is not supported:" + inputField.getMessageType().getFullName(), + null, + GrpcStatusCode.of(Status.Code.INVALID_ARGUMENT), + false); + } + visitedTypes.add(msgFullName); + rootProtoSchema.addNestedType( + convertInternal( + inputField.getMessageType(), + visitedTypes, + enumTypes, + structTypes, + rootProtoSchema) + .getProtoDescriptor()); + visitedTypes.remove(msgFullName); + resultField.setTypeName( + rootProtoSchema.getNestedType(rootProtoSchema.getNestedTypeCount() - 1).getName()); + } + } + + if (inputField.getType() == FieldDescriptor.Type.ENUM) { + // For enums, in order to avoid value conflict, we will always define + // a enclosing struct called enum_full_name_E that includes the actual + // enum. + String enumFullName = inputField.getEnumType().getFullName(); + String enclosingTypeName = getNameFromFullName(enumFullName) + "_E"; + String enumName = inputField.getEnumType().getName(); + String actualEnumFullName = enclosingTypeName + "." + enumName; + if (enumTypes.contains(enumFullName)) { + resultField.setTypeName(actualEnumFullName); + } else { + EnumDescriptorProto enumType = inputField.getEnumType().toProto(); + resultProto.addNestedType( + DescriptorProto.newBuilder() + .setName(enclosingTypeName) + .addEnumType(enumType.toBuilder().setName(enumName)) + .build()); + resultField.setTypeName(actualEnumFullName); + enumTypes.add(enumFullName); + } + } + // The protobuf payload will be decoded as proto2 on the server side. The schema is also + // specified as proto2. Hence we must clear proto3-only features. This works since proto2 and + // proto3 are binary-compatible. + if (resultField.hasProto3Optional()) { + // Clear proto3-only features + resultField.clearProto3Optional(); + } + if (resultField.hasOneofIndex()) { + // Clear proto3-only features + resultField.clearOneofIndex(); + } + resultProto.addField(resultField); + } + structTypes.add(protoFullName); + + return ProtoSchema.newBuilder().setProtoDescriptor(resultProto.build()).build(); + } + + public static ProtoSchema convert(Descriptor descriptor) { + Set visitedTypes = new HashSet(); + Set enumTypes = new HashSet(); + Set structTypes = new HashSet(); + return convertInternal(descriptor, visitedTypes, enumTypes, structTypes, null); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/RequestProfiler.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/RequestProfiler.java new file mode 100644 index 000000000000..838935867558 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/RequestProfiler.java @@ -0,0 +1,452 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.common.annotations.VisibleForTesting; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.PriorityQueue; +import java.util.Queue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Logger; + +/** + * A profiler that would periodically generate a report for the past period with the latency report + * for the slowest requests. This is used for debugging only. A request id would be generated for + * each request at runtime. Certain part of the code would be wrapped with startOperation(...) and + * endOperation(...) to measure the latency of operations of individual request. + * + *

+ * The report will contain the execution details of the TOP_K slowest requests, one example:
+ *
+ * INFO: During the last 60000 milliseconds at system time 1720825020138, in total 2 requests finished. Total dropped request is 0. The top 10 long latency requests details report:
+ * -----------------------------
+ * 	Request uuid: request_1 with total time 1000 milliseconds
+ * 		Operation name json_to_proto_conversion starts at: 1720566109971, ends at: 1720566109971, total time: 200 milliseconds
+ * 		Operation name backend_latency starts at: 1720566109971, ends at: 1720566109971, total time: 800 milliseconds
+ * -----------------------------
+ * 	Request uuid: request_2 with total time 500 milliseconds
+ * 		Operation name json_to_proto_conversion starts at: 1720566109971, ends at: 1720566109971, total time: 250 milliseconds
+ * 		Operation name backend_latency starts at: 1720566109971, ends at: 1720566109971, total time: 250 milliseconds
+ * ...
+ * 
+ */ +public class RequestProfiler { + enum OperationName { + // The total end to end latency for a request. + TOTAL_LATENCY("append_request_total_latency"), + // Json to proto conversion time. + JSON_TO_PROTO_CONVERSION("json_to_proto_conversion"), + // Time spent within wait queue before it get picked up. + WAIT_QUEUE("wait_queue"), + // Time spent during retry backoff. + RETRY_BACKOFF("retry_backoff"), + // Time spent within backend + the time spent over network. + RESPONSE_LATENCY("response_latency"), + // Time spent to wait wait queue to have vacancy. + WAIT_INFLIGHT_QUOTA("wait_inflight_quota"); + private final String operationName; + + OperationName(String operationName) { + this.operationName = operationName; + } + } + + private static final Logger log = Logger.getLogger(RequestProfiler.class.getName()); + + // Discard the requests if we are caching too many requests. + private static final int MAX_CACHED_REQUEST = 100000; + + // Singleton for easier access. + private static final RequestProfiler REQUEST_PROFILER_SINGLETON = new RequestProfiler(); + + // Tunable static variable indicate how many top longest latency requests we should consider. + private static final int DEFAULT_TOP_K = 20; + private static int TOP_K = DEFAULT_TOP_K; + + // Tunable static variable indicate how often the report should be generated. + private static final Duration DEFAULT_FLUSH_PERIOD = Duration.ofMinutes(1); + private static Duration FLUSH_PERIOD = DEFAULT_FLUSH_PERIOD; + + // From request uuid to the profiler of individual request. This will be cleaned up periodically. + private final Map idToIndividualOperation = + new ConcurrentHashMap<>(); + + private Thread flushThread; + + // Whether the periodical logging is enabled or not. + private boolean enableProfiiler = false; + + // Count the total number of dropped operations. + AtomicLong droppedOperationCount = new AtomicLong(0); + + // Mark an operation for a given request id to be start. + void startOperation(OperationName operationName, String requestUniqueId) { + try { + // Skip the whole endOperation is profiler is not enabled. + if (!enableProfiiler) { + return; + } + if (!idToIndividualOperation.containsKey(requestUniqueId)) { + if (idToIndividualOperation.size() > MAX_CACHED_REQUEST) { + log.warning( + String.format( + "startOperation is triggered for request_id: %s that's hasn't seen before, this" + + " is possible when we are recording too much ongoing requests. So far we" + + " has dropped %s operations.", + requestUniqueId, droppedOperationCount)); + droppedOperationCount.incrementAndGet(); + return; + } + idToIndividualOperation.put( + requestUniqueId, new IndividualRequestProfiler(requestUniqueId)); + } + idToIndividualOperation.get(requestUniqueId).startOperation(operationName); + } catch (Exception ex) { + // Mute any exception thrown from profiler process as we don't want to interrupt normal + // operations. + log.warning( + "Exception thrown request profiler ignored, this is suggesting faulty implementation of " + + "RequestProfiler, exception context: " + + ex.toString()); + } + } + + // Mark an operation for a given request id to be end. + void endOperation(OperationName operationName, String requestUniqueId) { + try { + // Skip the whole endOperation is profiler is not enabled. + if (!enableProfiiler) { + return; + } + if (!idToIndividualOperation.containsKey(requestUniqueId)) { + log.warning( + String.format( + "endOperation is triggered for request_id: %s that's hasn't seen before, this is" + + " possible when we are recording too much ongoing requests. So far we has" + + " dropped %s operations.", + requestUniqueId, droppedOperationCount)); + return; + } + idToIndividualOperation.get(requestUniqueId).endOperation(operationName); + } catch (Exception ex) { + // Mute any exception thrown from profiler process as we don't want to interrupt normal + // operations. + log.warning( + "Exception thrown request profiler ignored, this is suggesting faulty implementation of " + + "RequestProfiler, exception context: " + + ex.toString()); + } + } + + void flushAndPrintReport() { + if (!enableProfiiler) { + // Only do work when enabled. + return; + } + log.info(flushAndGenerateReportText()); + } + + // Periodically trigger the report generation. + void startPeriodicalReportFlushing() { + this.enableProfiiler = true; + if (this.flushThread == null || !this.flushThread.isAlive()) { + this.flushThread = + new Thread( + new Runnable() { + @Override + public void run() { + try { + while (true) { + try { + TimeUnit.MILLISECONDS.sleep(FLUSH_PERIOD.toMillis()); + } catch (InterruptedException e) { + log.warning("Flush report thread is interrupted by " + e.toString()); + throw new RuntimeException(e); + } + flushAndPrintReport(); + } + } catch (Exception ex) { + // Mute any exception thrown from profiler process as we don't want to + // interrupt normal operations. + log.warning( + "Exception thrown request profiler ignored, this is suggesting faulty " + + "implementation of " + + "RequestProfiler, exception context: " + + ex.toString()); + } + } + }); + this.flushThread.start(); + } + } + + String flushAndGenerateReportText() { + RequestProfilerComparator comparator = new RequestProfilerComparator(); + + // Find the top k requests with the longest latency. + PriorityQueue minHeap = + new PriorityQueue(comparator); + Iterator> iterator = + idToIndividualOperation.entrySet().iterator(); + int finishedRequestCount = 0; + // Iterate through all the requests stats, add to min heap if that's a finished request and has + // longer total + // latency than the least amount of latency in the min heap. + while (iterator.hasNext()) { + Entry individualRequestProfiler = iterator.next(); + if (!individualRequestProfiler.getValue().finalized) { + continue; + } + finishedRequestCount++; + if (minHeap.size() < TOP_K + || individualRequestProfiler.getValue().totalTime > minHeap.peek().totalTime) { + minHeap.add(individualRequestProfiler.getValue()); + } + if (minHeap.size() > TOP_K) { + minHeap.poll(); + } + // Remove during using iterator is safe. + iterator.remove(); + } + + // Generate report for the TOP_K longest requests. + String reportText = + String.format( + "During the last %s milliseconds at system time %s, in total %s requests finished." + + " Total dropped request is %s. The top %s long latency requests details" + + " report:\n", + FLUSH_PERIOD.toMillis(), + System.currentTimeMillis(), + finishedRequestCount, + droppedOperationCount.getAndSet(0), + TOP_K); + if (minHeap.isEmpty()) { + reportText += "-----------------------------\n"; + reportText += "\t0 requests finished during the last period."; + } else { + // Print the report for the top k requests. + ArrayList reportList = new ArrayList<>(); + while (minHeap.size() > 0) { + reportList.add("-----------------------------\n" + minHeap.poll().generateReport()); + } + // Output in reverse order to make sure the longest latency request shows up in front. + for (int i = 0; i < reportList.size(); i++) { + reportText += reportList.get(reportList.size() - i - 1); + } + } + return reportText; + } + + // Min heap comparator + private class RequestProfilerComparator implements Comparator { + @Override + public int compare(IndividualRequestProfiler x, IndividualRequestProfiler y) { + if (x.totalTime > y.totalTime) { + return 1; + } else if (x.totalTime < y.totalTime) { + return -1; + } + return 0; + } + } + + /** + * Record the profiling information for each individual request. Act like a buffer of the past + * requests, either finished or not finished. + */ + private static final class IndividualRequestProfiler { + // From operation name to the list of time spent each time we do this operation. + // e.g. some operation is retried two times, resulting in two time recorded in the queue. + private final Map> timeRecorderMap; + + // All current finished operations. + private final List finishedOperations; + + private final String requestUniqueId; + + // TOTAL_REQUEST has been marked as finished for this request. In this state `finalized` will + // be true and totalTime will have non zero value. + private long totalTime; + private boolean finalized; + + IndividualRequestProfiler(String requestUniqueId) { + this.timeRecorderMap = new ConcurrentHashMap<>(); + this.finishedOperations = Collections.synchronizedList(new ArrayList()); + this.requestUniqueId = requestUniqueId; + } + + void startOperation(OperationName operationName) { + timeRecorderMap.putIfAbsent(operationName, new ConcurrentLinkedDeque<>()); + // Please be aware that System.currentTimeMillis() is not accurate in Windows system. + timeRecorderMap.get(operationName).add(System.currentTimeMillis()); + } + + void endOperation(OperationName operationName) { + if (!timeRecorderMap.containsKey(operationName)) { + String warningMessage = + String.format( + "Operation %s ignored for request %s due to " + + "startOperation() is not called before calling endOperation().", + operationName, requestUniqueId); + log.warning(warningMessage); + return; + } + if (timeRecorderMap.get(operationName).isEmpty()) { + String warningMessage = + String.format( + "Operation %s ignored for request %s due to no previous startOperation() triggered" + + " for this operation", + operationName, requestUniqueId); + log.warning(warningMessage); + return; + } + + long startTime = timeRecorderMap.get(operationName).poll(); + long endTime = System.currentTimeMillis(); + long totalTime = endTime - startTime; + finishedOperations.add(new IndividualOperation(operationName, startTime, endTime, totalTime)); + if (operationName == OperationName.TOTAL_LATENCY) { + finalized = true; + this.totalTime = totalTime; + } + } + + String generateReport() { + String message = + "\tRequest uuid: " + + requestUniqueId + + " with total time " + + this.totalTime + + " milliseconds\n"; + for (int i = 0; i < finishedOperations.size(); i++) { + if (finishedOperations.get(i).operationName == OperationName.TOTAL_LATENCY) { + continue; + } + message += "\t\t"; + message += finishedOperations.get(i).format(); + message += "\n"; + } + return message; + } + + // Record the stats of individual operation. + private static final class IndividualOperation { + OperationName operationName; + + // Runtime stats for individual operation. + long totalTime; + long startTimestamp; + long endTimestamp; + + IndividualOperation( + OperationName operationName, long startTimestamp, long endTimestamp, long totalTime) { + this.operationName = operationName; + this.startTimestamp = startTimestamp; + this.endTimestamp = endTimestamp; + this.totalTime = totalTime; + } + + String format() { + return String.format( + "Operation name %s starts at: %s, ends at: " + "%s, total time: %s milliseconds", + operationName.operationName, startTimestamp, endTimestamp, totalTime); + } + } + } + + // Sets how many top latency requests to log during every report period. + public static void setTopKRequestsToLog(int topK) { + TOP_K = topK; + } + + // Sets the report period of the profiler. + public static void setReportPeriod(Duration flushPeriod) { + FLUSH_PERIOD = flushPeriod; + } + + @VisibleForTesting + void enableProfiler() { + this.enableProfiiler = true; + } + + void internalDisableAndClearProfiler() { + this.enableProfiiler = false; + if (this.flushThread != null) { + this.flushThread.interrupt(); + } + this.idToIndividualOperation.clear(); + this.droppedOperationCount.set(0); + + // Set back to default value. + TOP_K = DEFAULT_TOP_K; + FLUSH_PERIOD = DEFAULT_FLUSH_PERIOD; + } + + public static void disableAndResetProfiler() { + REQUEST_PROFILER_SINGLETON.internalDisableAndClearProfiler(); + } + + /** + * A hook for easier access to request profiler. Otherwise we have to trigger tedious if clauses + * to check whether profiler is enabled before every caller's trigger of the request profiler. + * This is because profiler is shared statically across instances. + */ + static class RequestProfilerHook { + private boolean enableRequestProfiler = false; + + RequestProfilerHook(boolean enableRequestProfiler) { + this.enableRequestProfiler = enableRequestProfiler; + } + + // Mimic the api exposed by the main request profiler. + void startOperation(OperationName operationName, String requestUniqueId) { + if (this.enableRequestProfiler) { + RequestProfiler.REQUEST_PROFILER_SINGLETON.startOperation(operationName, requestUniqueId); + } + } + + // Mimic the api exposed by the main request profiler. + void endOperation(OperationName operationName, String requestUniqueId) { + if (this.enableRequestProfiler) { + RequestProfiler.REQUEST_PROFILER_SINGLETON.endOperation(operationName, requestUniqueId); + } + } + + void startPeriodicalReportFlushing() { + if (this.enableRequestProfiler) { + RequestProfiler.REQUEST_PROFILER_SINGLETON.startPeriodicalReportFlushing(); + } + } + + String flushAndGenerateReportText() { + return RequestProfiler.REQUEST_PROFILER_SINGLETON.flushAndGenerateReportText(); + } + + void enableProfiler() { + REQUEST_PROFILER_SINGLETON.enableProfiler(); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java new file mode 100644 index 000000000000..600dddac89d5 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java @@ -0,0 +1,744 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.batching.FlowControlSettings; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.ExecutorProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation; +import com.google.cloud.bigquery.storage.v1.Exceptions.AppendSerializationError; +import com.google.cloud.bigquery.storage.v1.Exceptions.RowIndexToErrorException; +import com.google.common.base.Preconditions; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import com.google.protobuf.DynamicMessage; +import com.google.rpc.Code; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import javax.annotation.Nullable; + +/** + * A StreamWriter that can write data to BigQuery tables. The SchemaAwareStreamWriter is built on + * top of a StreamWriter, and it converts all data to Protobuf messages using provided converter + * then calls StreamWriter's append() method to write to BigQuery tables. It maintains all + * StreamWriter functions, but also provides an additional feature: schema update support, where if + * the BigQuery table schema is updated, users will be able to ingest data on the new schema after + * some time (in order of minutes). + * + *

NOTE: The schema update ability will be disabled when you pass in a table schema explicitly + * through the writer. It is recommended that user either use JsonStreamWriter (which fully manages + * table schema) or StreamWriter (which accepts proto format in raw and user will handle the schema + * update event themsevles). If you use this class, you need to be very cautious about possible + * mistmach between the writer's schema and the input data, any mismatch of the two will cause data + * corruption. + */ +public class SchemaAwareStreamWriter implements AutoCloseable { + private static final Logger LOG = Logger.getLogger(SchemaAwareStreamWriter.class.getName()); + private final BigQueryWriteClient client; + private final String streamName; + private final StreamWriter.Builder streamWriterBuilder; + private final boolean ignoreUnknownFields; + private final ToProtoConverter toProtoConverter; + private StreamWriter streamWriter; + private Descriptor descriptor; + private TableSchema tableSchema; + private ProtoSchema protoSchema; + private String CompressorName; + + // During some sitaution we want to skip stream writer refresh for updated schema. e.g. when + // the user provides the table schema, we should always use that schema. + private final boolean skipRefreshStreamWriter; + + // Provide access to the request profiler. + private final RequestProfiler.RequestProfilerHook requestProfilerHook; + + /** + * Constructs the SchemaAwareStreamWriter + * + * @param builder The Builder object for the SchemaAwareStreamWriter + */ + private SchemaAwareStreamWriter(Builder builder) + throws DescriptorValidationException, IllegalArgumentException, IOException { + this.descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(builder.tableSchema); + + if (builder.client == null) { + streamWriterBuilder = StreamWriter.newBuilder(builder.streamName); + } else { + streamWriterBuilder = StreamWriter.newBuilder(builder.streamName, builder.client); + } + this.protoSchema = ProtoSchemaConverter.convert(this.descriptor); + this.client = builder.client; + streamWriterBuilder.setWriterSchema(protoSchema); + setStreamWriterSettings( + builder.channelProvider, + builder.credentialsProvider, + builder.executorProvider, + builder.endpoint, + builder.flowControlSettings, + builder.traceId, + builder.compressorName, + builder.retrySettings); + streamWriterBuilder.setEnableConnectionPool(builder.enableConnectionPool); + streamWriterBuilder.setLocation(builder.location); + streamWriterBuilder.setDefaultMissingValueInterpretation( + builder.defaultMissingValueInterpretation); + streamWriterBuilder.setMissingValueInterpretationMap(builder.missingValueInterpretationMap); + streamWriterBuilder.setClientId(builder.clientId); + streamWriterBuilder.setEnableLatencyProfiler(builder.enableRequestProfiler); + requestProfilerHook = new RequestProfiler.RequestProfilerHook(builder.enableRequestProfiler); + if (builder.enableRequestProfiler) { + requestProfilerHook.startPeriodicalReportFlushing(); + } + streamWriterBuilder.setEnableOpenTelemetry(builder.enableOpenTelemetry); + this.streamWriter = streamWriterBuilder.build(); + this.streamName = builder.streamName; + this.tableSchema = builder.tableSchema; + this.toProtoConverter = builder.toProtoConverter; + this.ignoreUnknownFields = builder.ignoreUnknownFields; + this.skipRefreshStreamWriter = builder.skipRefreshStreamWriter; + } + + /** + * Writes a collection that contains objects to the BigQuery table by first converting the data to + * Protobuf messages, then using StreamWriter's append() to write the data at current end of + * stream. If there is a schema update, the current StreamWriter is closed. A new StreamWriter is + * created with the updated TableSchema. + * + * @param items The array that contains objects to be written + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture + */ + public ApiFuture append(Iterable items) + throws IOException, DescriptorValidationException { + String requestUniqueId = generateRequestUniqueId(); + requestProfilerHook.startOperation( + RequestProfiler.OperationName.TOTAL_LATENCY, requestUniqueId); + try { + return appendWithUniqueId(items, -1, requestUniqueId); + } catch (Exception ex) { + requestProfilerHook.endOperation( + RequestProfiler.OperationName.TOTAL_LATENCY, requestUniqueId); + throw ex; + } + } + + private void refreshWriter(TableSchema updatedSchema) + throws DescriptorValidationException, IOException { + Preconditions.checkNotNull(updatedSchema, "updatedSchema is null."); + LOG.info("Refresh internal writer due to schema update, stream: " + this.streamName); + // Close the StreamWriterf + this.streamWriter.close(); + // Update SchemaAwareStreamWriter's TableSchema and Descriptor + this.tableSchema = updatedSchema; + this.descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(updatedSchema); + this.protoSchema = ProtoSchemaConverter.convert(this.descriptor); + // Create a new underlying StreamWriter with the updated TableSchema and Descriptor + this.streamWriter = streamWriterBuilder.setWriterSchema(this.protoSchema).build(); + } + + private List buildMessage(Iterable items) + throws InterruptedException, DescriptorValidationException, IOException { + try { + return this.toProtoConverter.convertToProtoMessage( + this.descriptor, this.tableSchema, items, ignoreUnknownFields); + } catch (RowIndexToErrorException ex) { + // We only retry for data unknown error. + if (!ex.hasDataUnknownError) { + throw ex; + } + // Directly return error when stream writer refresh is disabled. + if (this.skipRefreshStreamWriter) { + throw ex; + } + LOG.warning( + "Saw unknown field error during proto message conversin within error messages" + + ex.rowIndexToErrorMessage + + ", try to refresh the writer with updated schema, stream: " + + streamName); + GetWriteStreamRequest writeStreamRequest = + GetWriteStreamRequest.newBuilder() + .setName(this.streamName) + .setView(WriteStreamView.FULL) + .build(); + WriteStream writeStream = client.getWriteStream(writeStreamRequest); + refreshWriter(writeStream.getTableSchema()); + return this.toProtoConverter.convertToProtoMessage( + this.descriptor, this.tableSchema, items, ignoreUnknownFields); + } + } + + /** + * Writes a collection that contains objects to the BigQuery table by first converting the data to + * Protobuf messages, then using StreamWriter's append() to write the data at the specified + * offset. If there is a schema update, the current StreamWriter is closed. A new StreamWriter is + * created with the updated TableSchema. + * + * @param items The collection that contains objects to be written + * @param offset Offset for deduplication + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture + */ + public ApiFuture append(Iterable items, long offset) + throws IOException, DescriptorValidationException { + String requestUniqueId = generateRequestUniqueId(); + requestProfilerHook.startOperation( + RequestProfiler.OperationName.TOTAL_LATENCY, requestUniqueId); + try { + return appendWithUniqueId(items, offset, requestUniqueId); + } catch (Exception ex) { + requestProfilerHook.endOperation( + RequestProfiler.OperationName.TOTAL_LATENCY, requestUniqueId); + throw ex; + } + } + + ApiFuture appendWithUniqueId( + Iterable items, long offset, String requestUniqueId) + throws DescriptorValidationException, IOException { + // Handle schema updates in a Thread-safe way by locking down the operation + synchronized (this) { + requestProfilerHook.startOperation( + RequestProfiler.OperationName.JSON_TO_PROTO_CONVERSION, requestUniqueId); + ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); + try { + // Create a new stream writer internally if a new updated schema is reported from backend. + if (!this.skipRefreshStreamWriter && this.streamWriter.getUpdatedSchema() != null) { + refreshWriter(this.streamWriter.getUpdatedSchema()); + } + // Any error in convertToProtoMessage will throw an + // IllegalArgumentException/IllegalStateException/NullPointerException. + // IllegalArgumentException will be collected into a Map of row indexes to error messages. + // After the conversion is finished an AppendSerializtionError exception that contains all + // the + // conversion errors will be thrown. + Map rowIndexToErrorMessage = new HashMap<>(); + try { + List protoMessages = buildMessage(items); + for (DynamicMessage dynamicMessage : protoMessages) { + rowsBuilder.addSerializedRows(dynamicMessage.toByteString()); + } + } catch (RowIndexToErrorException exception) { + rowIndexToErrorMessage = exception.rowIndexToErrorMessage; + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } + if (!rowIndexToErrorMessage.isEmpty()) { + throw new AppendSerializationError( + Code.INVALID_ARGUMENT.getNumber(), + "Append serialization failed for writer: " + streamName, + streamName, + rowIndexToErrorMessage); + } + } finally { + requestProfilerHook.endOperation( + RequestProfiler.OperationName.JSON_TO_PROTO_CONVERSION, requestUniqueId); + } + return this.streamWriter.appendWithUniqueId(rowsBuilder.build(), offset, requestUniqueId); + } + } + + /** + * @return The name of the write stream associated with this writer. + */ + public String getStreamName() { + return this.streamName; + } + + /** + * @return A unique Id for this writer. + */ + public String getWriterId() { + return streamWriter.getWriterId(); + } + + /** + * Gets current descriptor + * + * @return Descriptor + */ + public Descriptor getDescriptor() { + return this.descriptor; + } + + /** + * Gets the location of the destination + * + * @return Descriptor + */ + public String getLocation() { + return this.streamWriter.getLocation(); + } + + /** + * Returns the wait of a request in Client side before sending to the Server. Request could wait + * in Client because it reached the client side inflight request limit (adjustable when + * constructing the Writer). The value is the wait time for the last sent request. A constant high + * wait value indicates a need for more throughput, you can create a new Stream for to increase + * the throughput in exclusive stream case, or create a new Writer in the default stream case. + */ + public long getInflightWaitSeconds() { + return streamWriter.getInflightWaitSeconds(); + } + + /** + * @return the missing value interpretation map used for the writer. + */ + public Map + getMissingValueInterpretationMap() { + return streamWriter.getMissingValueInterpretationMap(); + } + + /** Sets all StreamWriter settings. */ + private void setStreamWriterSettings( + @Nullable TransportChannelProvider channelProvider, + @Nullable CredentialsProvider credentialsProvider, + @Nullable ExecutorProvider executorProvider, + @Nullable String endpoint, + @Nullable FlowControlSettings flowControlSettings, + @Nullable String traceId, + @Nullable String compressorName, + @Nullable RetrySettings retrySettings) { + if (channelProvider != null) { + streamWriterBuilder.setChannelProvider(channelProvider); + } + if (credentialsProvider != null) { + streamWriterBuilder.setCredentialsProvider(credentialsProvider); + } + if (executorProvider != null) { + streamWriterBuilder.setExecutorProvider(executorProvider); + } + if (endpoint != null) { + streamWriterBuilder.setEndpoint(endpoint); + } + if (traceId != null) { + streamWriterBuilder.setTraceId(traceId); + } + if (flowControlSettings != null) { + if (flowControlSettings.getMaxOutstandingRequestBytes() != null) { + streamWriterBuilder.setMaxInflightBytes( + flowControlSettings.getMaxOutstandingRequestBytes()); + } + if (flowControlSettings.getMaxOutstandingElementCount() != null) { + streamWriterBuilder.setMaxInflightRequests( + flowControlSettings.getMaxOutstandingElementCount()); + } + if (flowControlSettings.getLimitExceededBehavior() != null) { + streamWriterBuilder.setLimitExceededBehavior( + flowControlSettings.getLimitExceededBehavior()); + } + } + if (compressorName != null) { + streamWriterBuilder.setCompressorName(compressorName); + } + if (retrySettings != null) { + streamWriterBuilder.setRetrySettings(retrySettings); + } + } + + /** + * newBuilder that constructs a SchemaAwareStreamWriter builder with BigQuery client being + * initialized by StreamWriter by default. + * + *

The table schema passed in will be updated automatically when there is a schema update + * event. When used for Writer creation, it should be the latest schema. So when you are trying to + * reuse a stream, you should use Builder newBuilder( String streamOrTableName, + * BigQueryWriteClient client) instead, so the created Writer will be based on a fresh schema. + * + * @param streamOrTableName name of the stream that must follow + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" or table name + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+" + * @param tableSchema The schema of the table when the stream was created, which is passed back + * through {@code WriteStream} + * @return Builder + */ + public static Builder newBuilder( + String streamOrTableName, TableSchema tableSchema, ToProtoConverter toProtoConverter) { + Preconditions.checkNotNull(streamOrTableName, "StreamOrTableName is null."); + Preconditions.checkNotNull(tableSchema, "TableSchema is null."); + Preconditions.checkNotNull(toProtoConverter, "ToProtoConverter is null."); + return new Builder<>(streamOrTableName, tableSchema, null, toProtoConverter); + } + + /** + * newBuilder that constructs a SchemaAwareStreamWriter builder. + * + *

The table schema passed in will be updated automatically when there is a schema update + * event. When used for Writer creation, it should be the latest schema. So when you are trying to + * reuse a stream, you should use Builder newBuilder( String streamOrTableName, + * BigQueryWriteClient client) instead, so the created Writer will be based on a fresh schema. + * + * @param streamOrTableName name of the stream that must follow + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" + * @param tableSchema The schema of the table when the stream was created, which is passed back + * through {@code WriteStream} + * @param client + * @return Builder + */ + public static Builder newBuilder( + String streamOrTableName, + TableSchema tableSchema, + BigQueryWriteClient client, + ToProtoConverter toProtoConverter) { + Preconditions.checkNotNull(streamOrTableName, "StreamOrTableName is null."); + Preconditions.checkNotNull(tableSchema, "TableSchema is null."); + Preconditions.checkNotNull(client, "BigQuery client is null."); + Preconditions.checkNotNull(toProtoConverter, "ToProtoConverter is null."); + return new Builder<>(streamOrTableName, tableSchema, client, toProtoConverter); + } + + /** + * newBuilder that constructs a SchemaAwareStreamWriter builder with TableSchema being initialized + * by StreamWriter by default. + * + * @param streamOrTableName name of the stream that must follow + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" + * @param client BigQueryWriteClient + * @return Builder + */ + public static Builder newBuilder( + String streamOrTableName, BigQueryWriteClient client, ToProtoConverter toProtoConverter) { + Preconditions.checkNotNull(streamOrTableName, "StreamOrTableName is null."); + Preconditions.checkNotNull(client, "BigQuery client is null."); + Preconditions.checkNotNull(toProtoConverter, "ToProtoConverter is null."); + return new Builder<>(streamOrTableName, null, client, toProtoConverter); + } + + /** Closes the underlying StreamWriter. */ + @Override + public void close() { + this.streamWriter.close(); + } + + /** + * @return if a writer can no longer be used for writing. It is due to either the + * SchemaAwareStreamWriter is explicitly closed or the underlying connection is broken when + * connection pool is not used. Client should recreate SchemaAwareStreamWriter in this case. + */ + public boolean isClosed() { + return this.streamWriter.isClosed(); + } + + /** + * @return if user explicitly closed the writer. + */ + public boolean isUserClosed() { + return this.streamWriter.isUserClosed(); + } + + public static final class Builder { + private final String streamName; + private final BigQueryWriteClient client; + private final TableSchema tableSchema; + + private final boolean skipRefreshStreamWriter; + + private final ToProtoConverter toProtoConverter; + private TransportChannelProvider channelProvider; + private CredentialsProvider credentialsProvider; + private ExecutorProvider executorProvider; + private FlowControlSettings flowControlSettings; + private String endpoint; + private String traceIdBase; + private String traceId; + private boolean ignoreUnknownFields = false; + // Indicates whether multiplexing mode is enabled. + private boolean enableConnectionPool = false; + private String location; + private String compressorName; + private RetrySettings retrySettings; + + private AppendRowsRequest.MissingValueInterpretation defaultMissingValueInterpretation = + MissingValueInterpretation.MISSING_VALUE_INTERPRETATION_UNSPECIFIED; + private Map + missingValueInterpretationMap = new HashMap(); + private String clientId; + + private boolean enableRequestProfiler = false; + private boolean enableOpenTelemetry = false; + + private static final String streamPatternString = + "(projects/[^/]+/datasets/[^/]+/tables/[^/]+)/streams/[^/]+"; + private static final String tablePatternString = "(projects/[^/]+/datasets/[^/]+/tables/[^/]+)"; + private static final String defaultStreamPatternString = tablePatternString + "/_default"; + + private static final Pattern streamPattern = Pattern.compile(streamPatternString); + private static final Pattern tablePattern = Pattern.compile(tablePatternString); + private static final Pattern defaultStreamPattern = Pattern.compile(defaultStreamPatternString); + + /** + * Constructor for SchemaAwareStreamWriter's Builder + * + * @param streamOrTableName name of the stream that must follow + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" or + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+" + * @param tableSchema schema used to convert items to proto messages. + * @param client + * @param toProtoConverter converter used to convert items to proto messages + */ + private Builder( + String streamOrTableName, + TableSchema tableSchema, + BigQueryWriteClient client, + ToProtoConverter toProtoConverter) { + Matcher streamMatcher = streamPattern.matcher(streamOrTableName); + if (!streamMatcher.matches()) { + Matcher tableMatcher = tablePattern.matcher(streamOrTableName); + Matcher defaultStreamMatcher = defaultStreamPattern.matcher(streamOrTableName); + + if (!tableMatcher.matches() && !defaultStreamMatcher.matches()) { + throw new IllegalArgumentException("Invalid name: " + streamOrTableName); + } else if (!tableMatcher.matches() && defaultStreamMatcher.matches()) { + this.streamName = streamOrTableName; + } else { + this.streamName = streamOrTableName + "/_default"; + } + } else { + this.streamName = streamOrTableName; + } + this.client = client; + if (tableSchema == null) { + GetWriteStreamRequest writeStreamRequest = + GetWriteStreamRequest.newBuilder() + .setName(this.getStreamName()) + .setView(WriteStreamView.FULL) + .build(); + + WriteStream writeStream = this.client.getWriteStream(writeStreamRequest); + this.tableSchema = writeStream.getTableSchema(); + this.location = writeStream.getLocation(); + this.skipRefreshStreamWriter = false; + } else { + this.tableSchema = tableSchema; + this.skipRefreshStreamWriter = true; + } + this.toProtoConverter = toProtoConverter; + } + + /** + * Setter for the underlying StreamWriter's TransportChannelProvider. + * + * @param channelProvider + * @return Builder + */ + public Builder setChannelProvider(TransportChannelProvider channelProvider) { + this.channelProvider = + Preconditions.checkNotNull(channelProvider, "ChannelProvider is null."); + return this; + } + + /** + * Setter for the underlying StreamWriter's CredentialsProvider. + * + * @param credentialsProvider + * @return Builder + */ + public Builder setCredentialsProvider(CredentialsProvider credentialsProvider) { + this.credentialsProvider = + Preconditions.checkNotNull(credentialsProvider, "CredentialsProvider is null."); + return this; + } + + /** + * Setter for the underlying StreamWriter's ExecutorProvider. + * + * @param executorProvider + * @return + */ + public Builder setExecutorProvider(ExecutorProvider executorProvider) { + this.executorProvider = + Preconditions.checkNotNull(executorProvider, "ExecutorProvider is null."); + return this; + } + + /** + * Setter for the underlying StreamWriter's FlowControlSettings. + * + * @param flowControlSettings + * @return Builder + */ + public Builder setFlowControlSettings(FlowControlSettings flowControlSettings) { + this.flowControlSettings = + Preconditions.checkNotNull(flowControlSettings, "FlowControlSettings is null."); + return this; + } + + /** + * Stream name on the builder. + * + * @return Builder + */ + public String getStreamName() { + return streamName; + } + + /** + * Setter for the underlying StreamWriter's Endpoint. + * + * @param endpoint + * @return Builder + */ + public Builder setEndpoint(String endpoint) { + this.endpoint = Preconditions.checkNotNull(endpoint, "Endpoint is null."); + return this; + } + + /** + * Setter for a traceId to help identify traffic origin. + * + * @param traceId + * @return Builder + */ + public Builder setTraceId(String traceId) { + this.traceId = Preconditions.checkNotNull(traceId, "TraceId is null."); + return this; + } + + Builder setClientId(String clientId) { + this.clientId = Preconditions.checkNotNull(clientId, "ClientId is null."); + return this; + } + + /** + * Setter for a ignoreUnknownFields, if true, unknown fields to BigQuery will be ignored instead + * of error out. 1 + * + * @param ignoreUnknownFields + * @return Builder + */ + public Builder setIgnoreUnknownFields(boolean ignoreUnknownFields) { + this.ignoreUnknownFields = ignoreUnknownFields; + return this; + } + + /** + * Enable multiplexing for this writer. In multiplexing mode tables will share the same + * connection if possible until the connection is overwhelmed. This feature is still under + * development, please contact write api team before using. + * + * @param enableConnectionPool + * @return Builder + */ + public Builder setEnableConnectionPool(boolean enableConnectionPool) { + this.enableConnectionPool = enableConnectionPool; + return this; + } + + /** + * Location of the table this stream writer is targeting. Connection pools are shared by + * location. + * + * @param location + * @return Builder + */ + public Builder setLocation(String location) { + if (this.location != null && !this.location.equals(location)) { + throw new IllegalArgumentException( + "Specified location " + location + " does not match the system value " + this.location); + } + this.location = location; + return this; + } + + /** + * Sets the compression to use for the calls. The compressor must be of type gzip. + * + * @param compressorName + * @return Builder + */ + public Builder setCompressorName(String compressorName) { + this.compressorName = compressorName; + return this; + } + + /** + * Sets the default missing value interpretation value if the column is not presented in the + * missing_value_interpretations map. + */ + public Builder setDefaultMissingValueInterpretation( + AppendRowsRequest.MissingValueInterpretation defaultMissingValueInterpretation) { + this.defaultMissingValueInterpretation = defaultMissingValueInterpretation; + return this; + } + + /** + * Sets the missing value interpretation map for the SchemaAwareStreamWriter. The input + * missingValueInterpretationMap is used for all append requests unless otherwise changed. + * + * @param missingValueInterpretationMap the missing value interpretation map used by the + * SchemaAwareStreamWriter. + * @return Builder + */ + public Builder setMissingValueInterpretationMap( + Map missingValueInterpretationMap) { + this.missingValueInterpretationMap = missingValueInterpretationMap; + return this; + } + + /** + * Sets the RetrySettings to use for in-stream error retry. + * + * @param retrySettings + * @return Builder + */ + public Builder setRetrySettings(RetrySettings retrySettings) { + this.retrySettings = retrySettings; + return this; + } + + /** + * Enable a latency profiler that would periodically generate a detailed latency report for the + * top latency requests. This is currently an experimental API. + */ + public Builder setEnableLatencyProfiler(boolean enableLatencyProfiler) { + this.enableRequestProfiler = enableLatencyProfiler; + return this; + } + + /** Enable generation of metrics for OpenTelemetry. */ + public Builder setEnableOpenTelemetry(boolean enableOpenTelemetry) { + this.enableOpenTelemetry = enableOpenTelemetry; + return this; + } + + /** + * Builds SchemaAwareStreamWriter + * + * @return SchemaAwareStreamWriter + */ + public SchemaAwareStreamWriter build() + throws DescriptorValidationException, + IllegalArgumentException, + IOException, + InterruptedException { + return new SchemaAwareStreamWriter<>(this); + } + } + + private String generateRequestUniqueId() { + return getStreamName() + "-" + UUID.randomUUID().toString(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Singletons.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Singletons.java new file mode 100644 index 000000000000..c0f391ca0392 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Singletons.java @@ -0,0 +1,37 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.OpenTelemetry; +import java.util.logging.Logger; + +/** Container for global singleton objects. */ +public class Singletons { + + private static final Logger log = Logger.getLogger(Singletons.class.getName()); + + // Global OpenTelemetry instance + private static OpenTelemetry openTelemetry = null; + + public static OpenTelemetry getOpenTelemetry() { + if (openTelemetry == null) { + openTelemetry = GlobalOpenTelemetry.get(); + log.info("BigQueryStorage initialized Open Telemetry"); + } + return openTelemetry; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamConnection.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamConnection.java new file mode 100644 index 000000000000..0ab29e7673a5 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamConnection.java @@ -0,0 +1,123 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStream; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.StreamController; +import io.grpc.CallOptions; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** + * StreamConnection is responsible for writing requests to a GRPC bidirecional connection. + * + *

StreamWriter creates a connection. Two callback functions are necessary: request_callback and + * done_callback. Request callback is used for every request, and done callback is used to notify + * the user that the connection is closed and no more callbacks will be received from this + * connection. + * + *

The stream writer will accept all the requests without flow control, and makes the callbacks + * in receiving order. + * + *

It's user's responsibility to do the flow control and maintain the lifetime of the requests. + */ +class StreamConnection { + private BidiStreamingCallable bidiStreamingCallable; + private ClientStream clientStream; + + private RequestCallback requestCallback; + private DoneCallback doneCallback; + + private static final Logger log = Logger.getLogger(StreamConnection.class.getName()); + + public StreamConnection( + BigQueryWriteClient client, + RequestCallback requestCallback, + DoneCallback doneCallback, + @Nullable String compressorName) { + this.requestCallback = requestCallback; + this.doneCallback = doneCallback; + + ApiCallContext apiCallContext = null; + if (compressorName != null) { + apiCallContext = + GrpcCallContext.createDefault() + .withCallOptions(CallOptions.DEFAULT.withCompression(compressorName)); + log.info("gRPC compression is enabled with " + compressorName + " compression"); + } + + bidiStreamingCallable = client.appendRowsCallable(); + clientStream = + bidiStreamingCallable.splitCall( + new ResponseObserver() { + + @Override + public void onStart(StreamController controller) { + // no-op + } + + @Override + public void onResponse(AppendRowsResponse response) { + StreamConnection.this.requestCallback.run(response); + } + + @Override + public void onError(Throwable t) { + StreamConnection.this.doneCallback.run(t); + } + + @Override + public void onComplete() { + StreamConnection.this.doneCallback.run( + new StatusRuntimeException( + Status.fromCode(Code.CANCELLED) + .withDescription("Stream is closed by user."))); + } + }, + apiCallContext); + } + + /** + * Sends a request to the bi-directional stream connection. + * + * @param request request to send. + */ + public void send(AppendRowsRequest request) { + clientStream.send(request); + } + + /** Close the bi-directional stream connection. */ + public void close() { + clientStream.closeSend(); + } + + /** Invoked when a response is received from the server. */ + public static interface RequestCallback { + public void run(AppendRowsResponse response); + } + + /** Invoked when server closes the connection. */ + public static interface DoneCallback { + public void run(Throwable finalStatus); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java new file mode 100644 index 000000000000..6a386c7c849d --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java @@ -0,0 +1,1139 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.batching.FlowController; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.ExecutorProvider; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.auth.Credentials; +import com.google.auto.value.AutoOneOf; +import com.google.auto.value.AutoValue; +import com.google.cloud.bigquery.storage.v1.AppendFormats.AppendRowsData; +import com.google.cloud.bigquery.storage.v1.AppendFormats.AppendRowsSchema; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation; +import com.google.cloud.bigquery.storage.v1.ConnectionWorker.AppendRequestAndResponse; +import com.google.cloud.bigquery.storage.v1.ConnectionWorker.TableSchemaAndTimestamp; +import com.google.cloud.bigquery.storage.v1.StreamWriter.SingleConnectionOrConnectionPool.Kind; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.errorprone.annotations.CanIgnoreReturnValue; +import com.google.protobuf.ByteString; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import io.opentelemetry.api.common.Attributes; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.channels.Channels; +import java.time.Duration; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Lock; +import java.util.logging.Logger; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import javax.annotation.Nullable; +import org.apache.arrow.vector.ipc.WriteChannel; +import org.apache.arrow.vector.ipc.message.MessageSerializer; +import org.apache.arrow.vector.types.pojo.Schema; + +/** + * A BigQuery Stream Writer that can be used to write data into BigQuery Table. + * + *

TODO: Support batching. + */ +public class StreamWriter implements AutoCloseable { + private static final Logger log = Logger.getLogger(StreamWriter.class.getName()); + + private static String datasetsMatching = "projects/[^/]+/datasets/[^/]+/"; + private static Pattern streamPatternDatasets = Pattern.compile(datasetsMatching); + + private static String defaultStreamMatching = "/_default"; + private static Pattern streamPatternDefaultStream = Pattern.compile(defaultStreamMatching); + + // Cache of location info for a given dataset. + private static long LOCATION_CACHE_EXPIRE_MILLIS = 10 * 60 * 1000; // 10 minutes + + private static Cache allocateProjectLocationCache() { + return CacheBuilder.newBuilder() + .expireAfterWrite(LOCATION_CACHE_EXPIRE_MILLIS, TimeUnit.MILLISECONDS) + .build(); + } + + private static Cache projectAndDatasetToLocation = allocateProjectLocationCache(); + /* + * The identifier of stream to write to. + */ + private final String streamName; + + /** This is the library version may or may not include library version id. */ + private final String fullTraceId; + + /** Every writer has a fixed proto schema or arrow schema. */ + private final AppendRowsSchema writerSchema; + + /* + * Location of the destination. + */ + private final String location; + + /* + * If user has closed the StreamWriter. + */ + private AtomicBoolean userClosed = new AtomicBoolean(false); + + /* + * A String that uniquely identifies this writer. + */ + private final String writerId = UUID.randomUUID().toString(); + + /** + * The default missing value interpretation if the column has default value defined but not + * presented in the missing value map. + */ + private AppendRowsRequest.MissingValueInterpretation defaultMissingValueInterpretation = + MissingValueInterpretation.MISSING_VALUE_INTERPRETATION_UNSPECIFIED; + + // Map of fields to their MissingValueInterpretation, which dictates how a field should be + // populated when it is missing from an input user row. + private Map missingValueInterpretationMap = + new HashMap(); + + /** + * Stream can access a single connection or a pool of connection depending on whether multiplexing + * is enabled. + */ + private final SingleConnectionOrConnectionPool singleConnectionOrConnectionPool; + + /** Test only param to tell how many times a client is created. */ + private static int testOnlyClientCreatedTimes = 0; + + /** + * Static map from {@link ConnectionPoolKey} to connection pool. Note this map is static to be + * shared by every stream writer in the same process. + */ + private static final Map connectionPoolMap = + new ConcurrentHashMap<>(); + + /** Creation timestamp of this streamwriter */ + private final long creationTimestamp; + + /** Provide access to the request profiler tool. */ + private final RequestProfiler.RequestProfilerHook requestProfilerHook; + + private Lock lock; + + /** The maximum size of one request. Defined by the API. */ + public static long getApiMaxRequestBytes() { + return ConnectionWorker.getApiMaxRequestBytes(); + } + + /** + * Connection pool with different key will be split. + * + *

Shard based only on location right now. + */ + @AutoValue + abstract static class ConnectionPoolKey { + abstract String location(); + + abstract int credentialsHashcode(); + + public static ConnectionPoolKey create(String location, @Nullable Credentials credentials) { + return new AutoValue_StreamWriter_ConnectionPoolKey( + location, credentials != null ? credentials.hashCode() : 0); + } + } + + /** + * When in single table mode, append directly to connectionWorker. Otherwise append to connection + * pool in multiplexing mode. + */ + @AutoOneOf(SingleConnectionOrConnectionPool.Kind.class) + abstract static class SingleConnectionOrConnectionPool { + /** Kind of connection operation mode. */ + public enum Kind { + CONNECTION_WORKER, + CONNECTION_WORKER_POOL + } + + abstract Kind getKind(); + + abstract ConnectionWorker connectionWorker(); + + abstract ConnectionWorkerPool connectionWorkerPool(); + + ApiFuture append( + StreamWriter streamWriter, AppendRowsData rows, long offset, String requestUniqueId) { + if (getKind() == Kind.CONNECTION_WORKER) { + return connectionWorker().append(streamWriter, rows, offset, requestUniqueId); + } else { + return connectionWorkerPool().append(streamWriter, rows, offset, requestUniqueId); + } + } + + @VisibleForTesting + Attributes getTelemetryAttributes(StreamWriter streamWriter) { + if (getKind() == Kind.CONNECTION_WORKER) { + return connectionWorker().getTelemetryAttributes(); + } else { + return connectionWorkerPool().getTelemetryAttributes(streamWriter); + } + } + + void close(StreamWriter streamWriter) { + if (getKind() == Kind.CONNECTION_WORKER) { + connectionWorker().close(); + } else { + connectionWorkerPool().close(streamWriter); + } + } + + long getInflightWaitSeconds(StreamWriter streamWriter) { + if (getKind() == Kind.CONNECTION_WORKER_POOL) { + return connectionWorkerPool().getInflightWaitSeconds(streamWriter); + } + return connectionWorker().getInflightWaitSeconds(); + } + + TableSchemaAndTimestamp getUpdatedSchema(StreamWriter streamWriter) { + if (getKind() == Kind.CONNECTION_WORKER_POOL) { + return connectionWorkerPool().getUpdatedSchema(streamWriter); + } + // Always populate MIN timestamp to w + return connectionWorker().getUpdatedSchema(); + } + + String getWriterId(String streamWriterId) { + if (getKind() == Kind.CONNECTION_WORKER_POOL) { + return streamWriterId; + } + return connectionWorker().getWriterId(); + } + + static SingleConnectionOrConnectionPool ofSingleConnection(ConnectionWorker connection) { + return AutoOneOf_StreamWriter_SingleConnectionOrConnectionPool.connectionWorker(connection); + } + + static SingleConnectionOrConnectionPool ofConnectionPool(ConnectionWorkerPool connectionPool) { + return AutoOneOf_StreamWriter_SingleConnectionOrConnectionPool.connectionWorkerPool( + connectionPool); + } + } + + private StreamWriter(Builder builder) throws IOException { + this.streamName = builder.streamName; + this.writerSchema = builder.writerSchema; + this.defaultMissingValueInterpretation = builder.defaultMissingValueInterpretation; + this.missingValueInterpretationMap = builder.missingValueInterpretationMap; + BigQueryWriteSettings clientSettings = getBigQueryWriteSettings(builder); + this.requestProfilerHook = + new RequestProfiler.RequestProfilerHook(builder.enableRequestProfiler); + this.fullTraceId = builder.getFullTraceId(); + if (builder.enableRequestProfiler) { + // Request profiler is enabled on singleton level, from now on a periodical flush will be + // started + // to generate detailed latency reports for requests latency. + requestProfilerHook.startPeriodicalReportFlushing(); + } + if (!builder.enableConnectionPool) { + this.location = builder.location; + this.singleConnectionOrConnectionPool = + SingleConnectionOrConnectionPool.ofSingleConnection( + new ConnectionWorker( + builder.streamName, + builder.location, + builder.writerSchema, + builder.maxInflightRequest, + builder.maxInflightBytes, + builder.maxRetryDuration, + builder.limitExceededBehavior, + builder.getFullTraceId(), + builder.compressorName, + clientSettings, + builder.retrySettings, + builder.enableRequestProfiler, + builder.enableOpenTelemetry, + /* isMultiplexing= */ false)); + } else { + if (!isDefaultStream(streamName)) { + log.warning( + "Connection pool is only allowed in default stream! However received " + + builder.streamName); + throw new IllegalArgumentException( + "Trying to enable connection pool in non-default stream."); + } + + // We need a client to perform some getWriteStream calls. + BigQueryWriteClient client = + builder.client != null ? builder.client : new BigQueryWriteClient(clientSettings); + String location = builder.location; + if (location == null || location.isEmpty()) { + // Location is not passed in, try to fetch from RPC + String datasetAndProjectName = extractDatasetAndProjectName(builder.streamName); + try { + location = + projectAndDatasetToLocation.get( + datasetAndProjectName, + new Callable() { + @Override + public String call() throws Exception { + GetWriteStreamRequest writeStreamRequest = + GetWriteStreamRequest.newBuilder() + .setName(getStreamName()) + .setView(WriteStreamView.BASIC) + .build(); + + WriteStream writeStream = client.getWriteStream(writeStreamRequest); + TableSchema writeStreamTableSchema = writeStream.getTableSchema(); + String fetchedLocation = writeStream.getLocation(); + log.info( + String.format( + "Fetched location %s for stream name %s, extracted project and" + + " dataset name: %s\"", + fetchedLocation, streamName, datasetAndProjectName)); + return fetchedLocation; + } + }); + } catch (ExecutionException e) { + throw new IllegalStateException(e.getCause()); + } + if (location.isEmpty()) { + throw new IllegalStateException( + String.format( + "The location is empty for both user passed in value and looked up value for " + + "stream: %s, extracted project and dataset name: %s", + streamName, datasetAndProjectName)); + } + } + this.location = location; + CredentialsProvider credentialsProvider = client.getSettings().getCredentialsProvider(); + // Assume the connection in the same pool share the same client and trace id. + // The first StreamWriter for a new stub will create the pool for the other + // streams in the same region, meaning the per StreamWriter settings are no + // longer working unless all streams share the same set of settings + this.singleConnectionOrConnectionPool = + SingleConnectionOrConnectionPool.ofConnectionPool( + connectionPoolMap.computeIfAbsent( + ConnectionPoolKey.create( + location, + credentialsProvider != null ? credentialsProvider.getCredentials() : null), + (key) -> { + return new ConnectionWorkerPool( + builder.maxInflightRequest, + builder.maxInflightBytes, + builder.maxRetryDuration, + builder.limitExceededBehavior, + builder.compressorName, + client.getSettings(), + builder.retrySettings, + builder.enableRequestProfiler, + builder.enableOpenTelemetry); + })); + validateFetchedConnectonPool(builder); + // If the client is not from outside, then shutdown the client we created. + if (builder.client == null) { + client.shutdown(); + try { + client.awaitTermination(150, TimeUnit.SECONDS); + } catch (InterruptedException unused) { + // Ignore interruption as this client is not used. + } + client.close(); + } + } + this.creationTimestamp = System.nanoTime(); + } + + @VisibleForTesting + static String extractDatasetAndProjectName(String streamName) { + Matcher streamMatcher = streamPatternDatasets.matcher(streamName); + if (streamMatcher.find()) { + return streamMatcher.group(); + } else { + throw new IllegalStateException( + String.format("The passed in stream name does not match standard format %s", streamName)); + } + } + + @VisibleForTesting + static boolean isDefaultStream(String streamName) { + Matcher streamMatcher = streamPatternDefaultStream.matcher(streamName); + return streamMatcher.find(); + } + + @VisibleForTesting + static void recreateProjectLocationCache(long durationExpireMillis) { + LOCATION_CACHE_EXPIRE_MILLIS = durationExpireMillis; + projectAndDatasetToLocation = allocateProjectLocationCache(); + } + + String getFullTraceId() { + return fullTraceId; + } + + AppendRowsRequest.MissingValueInterpretation getDefaultValueInterpretation() { + return defaultMissingValueInterpretation; + } + + static BigQueryWriteSettings getBigQueryWriteSettings(Builder builder) throws IOException { + BigQueryWriteSettings.Builder settingsBuilder = null; + if (builder.client != null) { + settingsBuilder = builder.client.getSettings().toBuilder(); + } else { + settingsBuilder = + new BigQueryWriteSettings.Builder() + .setTransportChannelProvider( + BigQueryWriteSettings.defaultGrpcTransportProviderBuilder() + .setKeepAliveTimeDuration(java.time.Duration.ofMinutes(1)) + .setKeepAliveTimeoutDuration(java.time.Duration.ofMinutes(1)) + .setKeepAliveWithoutCalls(true) + .setChannelsPerCpu(2) + .build()) + .setCredentialsProvider( + BigQueryWriteSettings.defaultCredentialsProviderBuilder().build()) + .setBackgroundExecutorProvider( + BigQueryWriteSettings.defaultExecutorProviderBuilder().build()) + .setEndpoint(BigQueryWriteSettings.getDefaultEndpoint()); + } + if (builder.channelProvider != null) { + settingsBuilder.setTransportChannelProvider(builder.channelProvider); + } + if (builder.credentialsProvider != null) { + settingsBuilder.setCredentialsProvider(builder.credentialsProvider); + } + if (builder.executorProvider != null) { + settingsBuilder.setBackgroundExecutorProvider(builder.executorProvider); + } + if (builder.endpoint != null) { + settingsBuilder.setEndpoint(builder.endpoint); + } + + return settingsBuilder.build(); + } + + // Validate whether the fetched connection pool matched certain properties. + private void validateFetchedConnectonPool(StreamWriter.Builder builder) { + FlowController.LimitExceededBehavior storedLimitExceededBehavior = + singleConnectionOrConnectionPool.connectionWorkerPool().limitExceededBehavior(); + if (!Objects.equals(storedLimitExceededBehavior, builder.limitExceededBehavior)) { + throw new IllegalArgumentException( + String.format( + "Limit exceeded behavior setting used for the same connection pool for the same " + + "location must be the same, however stored value is %s, and expected " + + "value is %s.", + storedLimitExceededBehavior, builder.limitExceededBehavior)); + } + } + + /** + * Schedules the writing of Arrow record batch at the end of current stream. Since the + * StreamWriter doesn't know how many rows are in the batch, the OpenTelemetry row count metric + * will report 0 rows for the append. Please use the version of append method that accepts + * org.apache.arrow.vector.ipc.message.ArrowRecordBatch if OpenTelemetry row count is requried. + * Arrow schema is required to be set for the StreamWriter to use this method. + * + * @param recordBatch the Arrow record batch in serialized format to write to BigQuery. + *

Since the serialized Arrow record batch doesn't contain schema, to use this method, the + * StreamWriter must have been created with Arrow schema. + * @return the append response wrapped in a future. + */ + public ApiFuture append(ArrowRecordBatch recordBatch) { + return append(recordBatch, -1); + } + + /** + * Schedules the writing of rows at the end of current stream. + * + * @param rows the rows in serialized format to write to BigQuery. + * @return the append response wrapped in a future. + */ + public ApiFuture append(ProtoRows rows) { + return append(rows, -1); + } + + /** + * Schedules the writing of rows at given offset. + * + *

Example of writing rows with specific offset. + * + *

{@code
+   * ApiFuture future = writer.append(rows, 0);
+   * ApiFutures.addCallback(future, new ApiFutureCallback() {
+   *   public void onSuccess(AppendRowsResponse response) {
+   *     if (!response.hasError()) {
+   *       System.out.println("written with offset: " + response.getAppendResult().getOffset());
+   *     } else {
+   *       System.out.println("received an in stream error: " + response.getError().toString());
+   *     }
+   *   }
+   *
+   *   public void onFailure(Throwable t) {
+   *     System.out.println("failed to write: " + t);
+   *   }
+   * }, MoreExecutors.directExecutor());
+   * }
+ * + * @param rows the rows in serialized format to write to BigQuery. + * @param offset the offset of the first row. Provide -1 to write at the current end of stream. + * @return the append response wrapped in a future. + */ + public ApiFuture append(ProtoRows rows, long offset) { + return append(AppendRowsData.of(rows), offset); + } + + /** + * Schedules the writing of Arrow record batch at given offset. Since the StreamWriter doesn't + * know how many rows are in the batch, the OpenTelemetry row count metric will report 0 rows for + * the append. Please use the version of append method that accepts + * org.apache.arrow.vector.ipc.message.ArrowRecordBatch if OpenTelemetry row count is requried. + * Arrow schema is required to be set for the StreamWriter to use this method. + * + *

Example of writing Arrow record batch with specific offset. + * + *

{@code
+   * ApiFuture future = writer.append(recordBatch, 0);
+   * ApiFutures.addCallback(future, new ApiFutureCallback() {
+   *   public void onSuccess(AppendRowsResponse response) {
+   *     if (!response.hasError()) {
+   *       System.out.println("written with offset: " + response.getAppendResult().getOffset());
+   *     } else {
+   *       System.out.println("received an in stream error: " + response.getError().toString());
+   *     }
+   *   }
+   *
+   *   public void onFailure(Throwable t) {
+   *     System.out.println("failed to write: " + t);
+   *   }
+   * }, MoreExecutors.directExecutor());
+   * }
+ * + * @param recordBatch the ArrowRecordBatch in serialized format to write to BigQuery. + * @param offset the offset of the first row. Provide -1 to write at the current end of stream. + * @return the append response wrapped in a future. + */ + public ApiFuture append(ArrowRecordBatch recordBatch, long offset) { + return append(recordBatch, offset, -1); + } + + private ApiFuture append( + ArrowRecordBatch recordBatch, long offset, long recordBatchRowCount) { + return append(AppendRowsData.of(recordBatch, recordBatchRowCount), offset); + } + + /** + * Schedules the writing of Arrow record batch at the end of current stream. Arrow schema is + * required to be set for the StreamWriter to use this method. + * + * @param recordBatch the Arrow record batch to write to BigQuery. + *

Since the serialized Arrow record batch doesn't contain schema, to use this method, the + * StreamWriter must have been created with Arrow schema. The ArrowRecordBatch will be closed + * after it is serialized. + * @return the append response wrapped in a future. + */ + public ApiFuture append( + org.apache.arrow.vector.ipc.message.ArrowRecordBatch recordBatch) { + return append(recordBatch, -1); + } + + /** + * Schedules the writing of Arrow record batch at given offset. Arrow schema is required to be set + * for the StreamWriter to use this method. + * + * @param recordBatch the Arrow record batch to write to BigQuery. + * @param offset the offset of the first row. Provide -1 to write at the current end of stream. + *

The ArrowRecordBatch will be closed after it is serialized. + * @return the append response wrapped in a future. + */ + public ApiFuture append( + org.apache.arrow.vector.ipc.message.ArrowRecordBatch recordBatch, long offset) { + Preconditions.checkNotNull(recordBatch); + if (writerSchema.format() != AppendFormats.DataFormat.ARROW) { + throw new IllegalStateException( + "The StreamWriter must be created with Arrow schema to append Arrow data."); + } + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + MessageSerializer.serialize(new WriteChannel(Channels.newChannel(out)), recordBatch); + return append( + ArrowRecordBatch.newBuilder() + .setSerializedRecordBatch(ByteString.copyFrom(out.toByteArray())) + .build(), + offset, + recordBatch.getLength()); + } catch (IOException e) { + throw new StatusRuntimeException( + Status.INVALID_ARGUMENT + .withDescription("Failed to serialize arrow record batch.") + .withCause(e)); + } finally { + recordBatch.close(); + } + } + + private ApiFuture append(AppendRowsData rows, long offset) { + String requestUniqueId = generateRequestUniqueId(); + requestProfilerHook.startOperation( + RequestProfiler.OperationName.TOTAL_LATENCY, requestUniqueId); + try { + return appendWithUniqueId(rows, offset, requestUniqueId); + } catch (Exception ex) { + requestProfilerHook.endOperation( + RequestProfiler.OperationName.TOTAL_LATENCY, requestUniqueId); + throw ex; + } + } + + ApiFuture appendWithUniqueId( + ProtoRows rows, long offset, String requestUniqueId) { + return appendWithUniqueId(AppendRowsData.of(rows), offset, requestUniqueId); + } + + ApiFuture appendWithUniqueId( + AppendRowsData rows, long offset, String requestUniqueId) { + if (userClosed.get()) { + AppendRequestAndResponse requestWrapper = + new AppendRequestAndResponse( + AppendRowsRequest.newBuilder().build(), + /* streamWriter= */ this, + /* retrySettings= */ null, + requestUniqueId, + rows.recordBatchRowCount()); + requestWrapper.appendResult.setException( + new Exceptions.StreamWriterClosedException( + Status.fromCode(Status.Code.FAILED_PRECONDITION) + .withDescription("User closed StreamWriter"), + streamName, + getWriterId())); + requestProfilerHook.endOperation( + RequestProfiler.OperationName.TOTAL_LATENCY, requestUniqueId); + return requestWrapper.appendResult; + } + return this.singleConnectionOrConnectionPool.append(this, rows, offset, requestUniqueId); + } + + @VisibleForTesting + Attributes getTelemetryAttributes() { + return this.singleConnectionOrConnectionPool.getTelemetryAttributes(this); + } + + /** + * Returns the wait of a request in Client side before sending to the Server. Request could wait + * in Client because it reached the client side inflight request limit (adjustable when + * constructing the StreamWriter). The value is the wait time for the last sent request. A + * constant high wait value indicates a need for more throughput, you can create a new Stream for + * to increase the throughput in exclusive stream case, or create a new Writer in the default + * stream case. + */ + public long getInflightWaitSeconds() { + return singleConnectionOrConnectionPool.getInflightWaitSeconds(this); + } + + /** + * @return a unique Id for the writer. + */ + public String getWriterId() { + return singleConnectionOrConnectionPool.getWriterId(writerId); + } + + /** + * @return name of the Stream that this writer is working on. + */ + public String getStreamName() { + return streamName; + } + + /** + * @return the passed in user schema. + */ + /** {@return the user provided schema in a general AppendRowsSchema} */ + AppendRowsSchema getWriterSchema() { + return writerSchema; + } + + /** {@return the passed in Proto user schema} */ + public ProtoSchema getProtoSchema() { + if (writerSchema.format() == AppendFormats.DataFormat.PROTO) { + return writerSchema.protoSchema(); + } else { + throw new IllegalStateException("No Proto schema found."); + } + } + + /** {@return the passed in Arrow user schema} */ + public ArrowSchema getArrowSchema() { + if (writerSchema.format() == AppendFormats.DataFormat.ARROW) { + return writerSchema.arrowSchema(); + } else { + throw new IllegalStateException("No Arrow schema found."); + } + } + + /** + * @return the location of the destination. + */ + public String getLocation() { + return location; + } + + /** + * @return the missing value interpretation map used for the writer. + */ + public Map + getMissingValueInterpretationMap() { + return missingValueInterpretationMap; + } + + /** + * @return if a stream writer can no longer be used for writing. It is due to either the + * StreamWriter is explicitly closed or the underlying connection is broken when connection + * pool is not used. Client should recreate StreamWriter in this case. + */ + public boolean isClosed() { + if (singleConnectionOrConnectionPool.getKind() == Kind.CONNECTION_WORKER) { + return userClosed.get() + || singleConnectionOrConnectionPool.connectionWorker().isConnectionInUnrecoverableState(); + } else { + // With ConnectionPool, we will replace the bad connection automatically. + return userClosed.get(); + } + } + + /** + * @return if user explicitly closed the writer. + */ + public boolean isUserClosed() { + return userClosed.get(); + } + + /** Close the stream writer. Shut down all resources. */ + @Override + public void close() { + userClosed.set(true); + singleConnectionOrConnectionPool.close(this); + } + + /** Constructs a new {@link StreamWriter.Builder} using the given stream and client. */ + public static StreamWriter.Builder newBuilder(String streamName, BigQueryWriteClient client) { + return new StreamWriter.Builder(streamName, client); + } + + /** Constructs a new {@link StreamWriter.Builder} using the given stream. */ + public static StreamWriter.Builder newBuilder(String streamName) { + return new StreamWriter.Builder(streamName); + } + + /** + * Thread-safe getter of updated TableSchema. + * + *

This will return the updated schema only when the creation timestamp of this writer is older + * than the updated schema. + */ + public synchronized TableSchema getUpdatedSchema() { + TableSchemaAndTimestamp tableSchemaAndTimestamp = + singleConnectionOrConnectionPool.getUpdatedSchema(this); + if (tableSchemaAndTimestamp == null) { + return null; + } + return creationTimestamp < tableSchemaAndTimestamp.updateTimeStamp() + ? tableSchemaAndTimestamp.updatedSchema() + : null; + } + + /** + * Sets the maximum time a request is allowed to be waiting in request waiting queue. Under very + * low chance, it's possible for append request to be waiting indefintely for request callback + * when Google networking SDK does not detect the networking breakage. The default timeout is 15 + * minutes. We are investigating the root cause for callback not triggered by networking SDK. + */ + public static void setMaxRequestCallbackWaitTime(Duration waitTime) { + ConnectionWorker.MAXIMUM_REQUEST_CALLBACK_WAIT_TIME = waitTime; + } + + /** + * @return the default stream name associated with tableName + */ + public static String getDefaultStreamName(TableName tableName) { + return tableName + defaultStreamMatching; + } + + long getCreationTimestamp() { + return creationTimestamp; + } + + @VisibleForTesting + SingleConnectionOrConnectionPool.Kind getConnectionOperationType() { + return singleConnectionOrConnectionPool.getKind(); + } + + @VisibleForTesting + static int getTestOnlyClientCreatedTimes() { + return testOnlyClientCreatedTimes; + } + + @VisibleForTesting + static void cleanUp() { + testOnlyClientCreatedTimes = 0; + connectionPoolMap.clear(); + } + + @VisibleForTesting + ConnectionWorkerPool getTestOnlyConnectionWorkerPool() { + ConnectionWorkerPool connectionWorkerPool = null; + for (Entry entry : connectionPoolMap.entrySet()) { + connectionWorkerPool = entry.getValue(); + } + return connectionWorkerPool; + } + + @VisibleForTesting + Map getTestOnlyConnectionPoolMap() { + return connectionPoolMap; + } + + // A method to clear the static connection pool to avoid making pool visible to other tests. + @VisibleForTesting + static void clearConnectionPool() { + connectionPoolMap.clear(); + } + + /** A builder of {@link StreamWriter}s. */ + public static final class Builder { + private static final long DEFAULT_MAX_INFLIGHT_REQUESTS = 1000L; + + private static final long DEFAULT_MAX_INFLIGHT_BYTES = 100 * 1024 * 1024; // 100Mb. + + private String streamName; + + private BigQueryWriteClient client; + + private AppendRowsSchema writerSchema = null; + + private long maxInflightRequest = DEFAULT_MAX_INFLIGHT_REQUESTS; + + private long maxInflightBytes = DEFAULT_MAX_INFLIGHT_BYTES; + + private String endpoint = null; + + private TransportChannelProvider channelProvider = null; + + private CredentialsProvider credentialsProvider = null; + + private ExecutorProvider executorProvider = null; + + private FlowController.LimitExceededBehavior limitExceededBehavior = + FlowController.LimitExceededBehavior.Block; + + private String traceId = null; + + private String clientId = "java-streamwriter"; + + private TableSchema updatedTableSchema = null; + + private String location = null; + + private boolean enableConnectionPool = false; + + private java.time.Duration maxRetryDuration = Duration.ofMinutes(5); + + private String compressorName = null; + + // Default missing value interpretation value. + private AppendRowsRequest.MissingValueInterpretation defaultMissingValueInterpretation = + MissingValueInterpretation.MISSING_VALUE_INTERPRETATION_UNSPECIFIED; + + private Map + missingValueInterpretationMap = new HashMap(); + + private boolean enableRequestProfiler = false; + private boolean enableOpenTelemetry = false; + + private RetrySettings retrySettings = null; + + private Builder(String streamName) { + this.streamName = Preconditions.checkNotNull(streamName); + this.client = null; + } + + private Builder(String streamName, BigQueryWriteClient client) { + this.streamName = Preconditions.checkNotNull(streamName); + this.client = Preconditions.checkNotNull(client); + } + + /** Sets the user provided proto schema of the rows. */ + @CanIgnoreReturnValue + public Builder setWriterSchema(ProtoSchema protoSchema) { + this.writerSchema = AppendRowsSchema.of(protoSchema); + return this; + } + + /** Sets the user provided serialized Arrow schema of the rows. */ + @CanIgnoreReturnValue + public Builder setWriterSchema(ArrowSchema arrowSchema) { + this.writerSchema = AppendRowsSchema.of(arrowSchema); + return this; + } + + /** Sets the user provided unserialized Arrow schema of the rows. */ + @CanIgnoreReturnValue + public Builder setWriterSchema(Schema arrowSchema) { + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + try { + MessageSerializer.serialize(new WriteChannel(Channels.newChannel(out)), arrowSchema); + this.writerSchema = + AppendRowsSchema.of( + ArrowSchema.newBuilder() + .setSerializedSchema(ByteString.copyFrom(out.toByteArray())) + .build()); + } catch (IOException e) { + throw new StatusRuntimeException( + Status.INVALID_ARGUMENT.withDescription("Failed to serialize arrow schema.")); + } + return this; + } + + public Builder setMaxInflightRequests(long value) { + this.maxInflightRequest = value; + return this; + } + + public Builder setMaxInflightBytes(long value) { + this.maxInflightBytes = value; + return this; + } + + /** Gives the ability to override the gRPC endpoint. */ + public Builder setEndpoint(String endpoint) { + this.endpoint = Preconditions.checkNotNull(endpoint, "Endpoint is null."); + return this; + } + + /** + * Enables a static shared bidi-streaming connection pool that would dynamically scale up + * connections based on backlog within each individual connection. A single table's traffic + * might be splitted into multiple connections if needed. Different tables' traffic can also be + * multiplexed within the same connection. + * + *

+     * Each connection pool would have a upper limit (default to 20) and lower limit (default to
+     * 2) for the number of active connections. This parameter can be tuned via a static method
+     * exposed on {@link ConnectionWorkerPool}.
+     *
+     * Example:
+     * ConnectionWorkerPool.setOptions(
+     *     Settings.builder().setMinConnectionsPerRegion(4).setMaxConnectionsPerRegion(10).build());
+     *
+     * 
+ * + * @param enableConnectionPool + * @return Builder + */ + public Builder setEnableConnectionPool(boolean enableConnectionPool) { + this.enableConnectionPool = enableConnectionPool; + return this; + } + + /** + * {@code ChannelProvider} to use to create Channels, which must point at Cloud BigQuery Storage + * API endpoint. + * + *

For performance, this client benefits from having multiple underlying connections. See + * {@link com.google.api.gax.grpc.InstantiatingGrpcChannelProvider.Builder#setPoolSize(int)}. + */ + public Builder setChannelProvider(TransportChannelProvider channelProvider) { + this.channelProvider = + Preconditions.checkNotNull(channelProvider, "ChannelProvider is null."); + return this; + } + + /** {@code CredentialsProvider} to use to create Credentials to authenticate calls. */ + public Builder setCredentialsProvider(CredentialsProvider credentialsProvider) { + this.credentialsProvider = + Preconditions.checkNotNull(credentialsProvider, "CredentialsProvider is null."); + return this; + } + + /** {@code ExecutorProvider} to use to create Executor to run background jobs. */ + public Builder setExecutorProvider(ExecutorProvider executorProvider) { + this.executorProvider = + Preconditions.checkNotNull(executorProvider, "ExecutorProvider is null."); + return this; + } + + /** + * Sets traceId for debuging purpose. TraceId must follow the format of + * CustomerDomain:DebugString, e.g. DATAFLOW:job_id_x. + */ + public Builder setTraceId(String traceId) { + int colonIndex = traceId.indexOf(':'); + if (colonIndex == -1 || colonIndex == 0 || colonIndex == traceId.length() - 1) { + throw new IllegalArgumentException( + "TraceId must follow the format of A:B. Actual:" + traceId); + } + this.traceId = traceId; + return this; + } + + /** + * Sets the client id of the writer, for example, JsonStreamWriter has the client id of + * "java-jsonwriter". + */ + Builder setClientId(String clientId) { + this.clientId = clientId; + return this; + } + + /** Location of the table this stream writer is targeting. */ + public Builder setLocation(String location) { + this.location = location; + return this; + } + + /** + * Sets the limit exceeded behavior. + * + * @param limitExceededBehavior + * @return + */ + public Builder setLimitExceededBehavior( + FlowController.LimitExceededBehavior limitExceededBehavior) throws StatusRuntimeException { + if (limitExceededBehavior == FlowController.LimitExceededBehavior.Ignore) { + throw new StatusRuntimeException( + Status.fromCode(Code.INVALID_ARGUMENT) + .withDescription("LimitExceededBehavior.Ignore is not supported on StreamWriter.")); + } + this.limitExceededBehavior = limitExceededBehavior; + return this; + } + + /* + * Max duration to retry on retryable errors. Default is 5 minutes. You can allow unlimited + * retry by setting the value to be 0. + */ + public Builder setMaxRetryDuration(java.time.Duration maxRetryDuration) { + this.maxRetryDuration = maxRetryDuration; + return this; + } + + public Builder setCompressorName(String compressorName) { + Preconditions.checkNotNull(compressorName); + Preconditions.checkArgument( + compressorName.equals("gzip"), + "Compression of type \"%s\" isn't supported, only \"gzip\" compression is supported.", + compressorName); + this.compressorName = compressorName; + return this; + } + + /** + * Sets the default missing value interpretation value if the column is not presented in the + * missing_value_interpretations map. + */ + public Builder setDefaultMissingValueInterpretation( + AppendRowsRequest.MissingValueInterpretation defaultMissingValueInterpretation) { + this.defaultMissingValueInterpretation = defaultMissingValueInterpretation; + return this; + } + + /** + * Sets the missing value interpretation map for the stream writer. The input + * missingValueInterpretationMap is used for all write requests unless otherwise changed. + * + * @param missingValueInterpretationMap the missing value interpretation map used by stream + * writer. + * @return Builder + */ + public Builder setMissingValueInterpretationMap( + Map missingValueInterpretationMap) { + this.missingValueInterpretationMap = missingValueInterpretationMap; + return this; + } + + /** + * Enable a latency profiler that would periodically generate a detailed latency report for the + * top latency requests. This is currently an experimental API. + */ + public Builder setEnableLatencyProfiler(boolean enableLatencyProfiler) { + this.enableRequestProfiler = enableLatencyProfiler; + return this; + } + + /** Enable generation of metrics for OpenTelemetry. */ + public Builder setEnableOpenTelemetry(boolean enableOpenTelemetry) { + this.enableOpenTelemetry = enableOpenTelemetry; + return this; + } + + /** + * Enable client lib automatic retries on request level errors. + * + *

+     * Immeidate Retry code:
+     * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED
+     * Backoff Retry code:
+     * RESOURCE_EXHAUSTED
+     *
+     * Example:
+     * RetrySettings retrySettings = RetrySettings.newBuilder()
+     *      .setInitialRetryDelay(Duration.ofMillis(500)) // applies to backoff retry
+     *      .setRetryDelayMultiplier(1.1) // applies to backoff retry
+     *      .setMaxAttempts(5) // applies to both retries
+     *      .setMaxRetryDelay(Duration.ofMinutes(1)) // applies to backoff retry .build();
+     * 
+ * + * @param retrySettings + * @return + */ + public Builder setRetrySettings(RetrySettings retrySettings) { + this.retrySettings = retrySettings; + return this; + } + + /** Builds the {@code StreamWriterV2}. */ + public StreamWriter build() throws IOException { + return new StreamWriter(this); + } + + String getFullTraceId() { + String clientWithVersion = + GaxProperties.getLibraryVersion(StreamWriter.class).isEmpty() + ? clientId + : clientId + ":" + GaxProperties.getLibraryVersion(StreamWriter.class); + if (traceId == null || traceId.isEmpty()) { + return clientWithVersion; + } else { + return clientWithVersion + " " + traceId; + } + } + } + + private String generateRequestUniqueId() { + return getStreamName() + "-" + UUID.randomUUID().toString(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/TelemetryMetrics.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/TelemetryMetrics.java new file mode 100644 index 000000000000..de7c72f017fd --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/TelemetryMetrics.java @@ -0,0 +1,266 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.LongHistogram; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.MeterProvider; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.logging.Logger; + +/** + * Enables export of metrics to OpenTelemetry. Since it keeps track of whether metrics are + * enabled/disabled, it simplifies calling code by removing the need for tedious if clauses to check + * whether metrics are enabled/disabled. Also, GlobalOpenTelemetry.get() should only be called after + * client application has already installed the necessary meter provider. + */ +public class TelemetryMetrics { + private static final Logger log = Logger.getLogger(TelemetryMetrics.class.getName()); + + private final ConnectionWorker connectionWorker; + private final boolean enableMetrics; + + private Meter writeMeter; + + private static final class OpenTelemetryMetrics { + private LongCounter instrumentAckedRequestCount; + private LongCounter instrumentAckedRequestSize; + private LongCounter instrumentAckedRequestRows; + private LongHistogram instrumentNetworkResponseLatency; + private LongCounter instrumentConnectionStartCount; + private LongCounter instrumentConnectionEndCount; + } + + private OpenTelemetryMetrics openTelemetryMetrics = new OpenTelemetryMetrics(); + private static final List METRICS_MILLISECONDS_LATENCY_BUCKETS = + ImmutableList.of( + 0L, 17L, 38L, 86L, 195L, 438L, 985L, 2217L, 4988L, 11223L, 25251L, 56815L, 127834L, + 287627L, 647160L); + static AttributeKey telemetryKeyTableId = AttributeKey.stringKey("table_id"); + static AttributeKey telemetryKeyWriterId = AttributeKey.stringKey("writer_id"); + private static String dataflowPrefix = "dataflow:"; + static List> telemetryKeysTraceId = + new ArrayList>() { + { + add(AttributeKey.stringKey("trace_field_1")); + add(AttributeKey.stringKey("trace_field_2")); + add(AttributeKey.stringKey("trace_field_3")); + } + }; + static AttributeKey telemetryKeyErrorCode = AttributeKey.stringKey("error_code"); + static AttributeKey telemetryKeyIsRetry = AttributeKey.stringKey("is_retry"); + private Attributes telemetryAttributes; + + private void setTraceIdAttributesPart( + AttributesBuilder builder, + String[] traceIdParts, + int indexPartsToCheck, + int indexTelemetryKeysToUse) { + if ((indexPartsToCheck < traceIdParts.length) && !traceIdParts[indexPartsToCheck].isEmpty()) { + builder.put( + telemetryKeysTraceId.get(indexTelemetryKeysToUse), traceIdParts[indexPartsToCheck]); + } + } + + private void setTraceIdAttributes(AttributesBuilder builder, String traceId) { + if ((traceId != null) && !traceId.isEmpty()) { + int indexDataflow = traceId.toLowerCase().indexOf(dataflowPrefix); + if (indexDataflow >= 0) { + String[] traceIdParts = + traceId.substring(indexDataflow + dataflowPrefix.length()).split(":", 8); + setTraceIdAttributesPart(builder, traceIdParts, 0, 0); + setTraceIdAttributesPart(builder, traceIdParts, 1, 1); + setTraceIdAttributesPart(builder, traceIdParts, 2, 2); + } + } + } + + // Specify common attributes for all metrics. + // For example, table name and writer id. + // Metrics dashboards can be filtered on available attributes. + private Attributes buildOpenTelemetryAttributes( + String tableName, String writerId, String traceId) { + AttributesBuilder builder = Attributes.builder(); + if (!tableName.isEmpty()) { + builder.put(telemetryKeyTableId, tableName); + } + builder.put(telemetryKeyWriterId, writerId); + setTraceIdAttributes(builder, traceId); + return builder.build(); + } + + // Build new attributes augmented with an error code string. + private Attributes augmentAttributesWithErrorCode(Attributes attributes, String errorCode) { + AttributesBuilder builder = attributes.toBuilder(); + if ((errorCode != null) && !errorCode.isEmpty()) { + builder.put(telemetryKeyErrorCode, errorCode); + } + return builder.build(); + } + + // Build new attributes augmented with a flag indicating this was a retry. + private Attributes augmentAttributesWithRetry(Attributes attributes) { + AttributesBuilder builder = attributes.toBuilder(); + builder.put(telemetryKeyIsRetry, "1"); + return builder.build(); + } + + @VisibleForTesting + Attributes getTelemetryAttributes() { + return telemetryAttributes; + } + + private void registerOpenTelemetryMetrics(ConnectionWorker connectionWorker) { + MeterProvider meterProvider = Singletons.getOpenTelemetry().getMeterProvider(); + writeMeter = + meterProvider + .meterBuilder("com.google.cloud.bigquery.storage.v1.write") + .setInstrumentationVersion( + ConnectionWorker.class.getPackage().getImplementationVersion()) + .build(); + openTelemetryMetrics.instrumentAckedRequestCount = + writeMeter + .counterBuilder("append_requests_acked") + .setDescription("Counts number of requests acked by the server") + .build(); + openTelemetryMetrics.instrumentAckedRequestSize = + writeMeter + .counterBuilder("append_request_bytes_acked") + .setDescription("Counts byte size of requests acked by the server") + .build(); + openTelemetryMetrics.instrumentAckedRequestRows = + writeMeter + .counterBuilder("append_rows_acked") + .setDescription("Counts number of request rows acked by the server") + .build(); + writeMeter + .gaugeBuilder("active_connection_count") + .ofLongs() + .setDescription("Reports number of active connections") + .buildWithCallback( + measurement -> { + int count = connectionWorker.hasActiveConnection() ? 1 : 0; + measurement.record(count, getTelemetryAttributes()); + }); + writeMeter + .gaugeBuilder("inflight_queue_length") + .ofLongs() + .setDescription( + "Reports length of inflight queue. This queue contains sent append requests waiting for" + + " response from the server.") + .buildWithCallback( + measurement -> { + int length = connectionWorker.getInflightRequestQueueLength(); + measurement.record(length, getTelemetryAttributes()); + }); + openTelemetryMetrics.instrumentNetworkResponseLatency = + writeMeter + .histogramBuilder("network_response_latency") + .ofLongs() + .setDescription( + "Reports time taken in milliseconds for a response to arrive once a message has" + + " been sent over the network.") + .setExplicitBucketBoundariesAdvice(METRICS_MILLISECONDS_LATENCY_BUCKETS) + .build(); + openTelemetryMetrics.instrumentConnectionStartCount = + writeMeter + .counterBuilder("connection_start_count") + .setDescription( + "Counts number of connection attempts made, regardless of whether these are initial" + + " or retry.") + .build(); + openTelemetryMetrics.instrumentConnectionEndCount = + writeMeter + .counterBuilder("connection_end_count") + .setDescription("Counts number of connection end events.") + .build(); + } + + TelemetryMetrics( + ConnectionWorker connectionWorker, + boolean enableMetrics, + String tableName, + String writerId, + String traceId) { + this.connectionWorker = connectionWorker; + this.enableMetrics = enableMetrics; + if (enableMetrics) { + this.telemetryAttributes = buildOpenTelemetryAttributes(tableName, writerId, traceId); + registerOpenTelemetryMetrics(connectionWorker); + } + } + + // Refresh the table name attribute when multiplexing switches between tables. + public void refreshOpenTelemetryTableNameAttributes(String tableName) { + if (enableMetrics) { + if (!tableName.isEmpty() + && !tableName.equals(getTelemetryAttributes().get(telemetryKeyTableId))) { + AttributesBuilder builder = getTelemetryAttributes().toBuilder(); + builder.put(telemetryKeyTableId, tableName); + this.telemetryAttributes = builder.build(); + } + } + } + + public void recordConnectionStart() { + if (enableMetrics) { + openTelemetryMetrics.instrumentConnectionStartCount.add(1, getTelemetryAttributes()); + } + } + + public void recordConnectionStartWithRetry() { + if (enableMetrics) { + openTelemetryMetrics.instrumentConnectionStartCount.add( + 1, augmentAttributesWithRetry(getTelemetryAttributes())); + } + } + + public void recordConnectionEnd(String errorCode) { + if (enableMetrics) { + openTelemetryMetrics.instrumentConnectionEndCount.add( + 1, augmentAttributesWithErrorCode(getTelemetryAttributes(), errorCode)); + } + } + + public void recordNetworkLatency(Duration latency) { + if (enableMetrics) { + openTelemetryMetrics.instrumentNetworkResponseLatency.record( + latency.toMillis(), getTelemetryAttributes()); + } + } + + public void recordResponse(long messageSize, long rowCount, String errorCode, boolean isRetry) { + if (enableMetrics) { + Attributes augmentedTelemetryAttributes = + augmentAttributesWithErrorCode(getTelemetryAttributes(), errorCode); + if (isRetry) { + augmentedTelemetryAttributes = augmentAttributesWithRetry(augmentedTelemetryAttributes); + } + openTelemetryMetrics.instrumentAckedRequestCount.add(1, augmentedTelemetryAttributes); + openTelemetryMetrics.instrumentAckedRequestSize.add( + messageSize, augmentedTelemetryAttributes); + openTelemetryMetrics.instrumentAckedRequestRows.add(rowCount, augmentedTelemetryAttributes); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ToProtoConverter.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ToProtoConverter.java new file mode 100644 index 000000000000..b215fcf13ec2 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ToProtoConverter.java @@ -0,0 +1,28 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.protobuf.Descriptors; +import com.google.protobuf.DynamicMessage; +import java.util.List; + +public interface ToProtoConverter { + List convertToProtoMessage( + Descriptors.Descriptor protoSchema, + TableSchema tableSchema, + Iterable inputObject, + boolean ignoreUnknownFields); +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/gapic_metadata.json b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/gapic_metadata.json new file mode 100644 index 000000000000..e4541a4a76b2 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/gapic_metadata.json @@ -0,0 +1,54 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.cloud.bigquery.storage.v1", + "libraryPackage": "com.google.cloud.bigquery.storage.v1", + "services": { + "BigQueryRead": { + "clients": { + "grpc": { + "libraryClient": "BaseBigQueryReadClient", + "rpcs": { + "CreateReadSession": { + "methods": ["createReadSession", "createReadSession", "createReadSession", "createReadSessionCallable"] + }, + "ReadRows": { + "methods": ["readRowsCallable"] + }, + "SplitReadStream": { + "methods": ["splitReadStream", "splitReadStreamCallable"] + } + } + } + } + }, + "BigQueryWrite": { + "clients": { + "grpc": { + "libraryClient": "BigQueryWriteClient", + "rpcs": { + "AppendRows": { + "methods": ["appendRowsCallable"] + }, + "BatchCommitWriteStreams": { + "methods": ["batchCommitWriteStreams", "batchCommitWriteStreams", "batchCommitWriteStreams", "batchCommitWriteStreamsCallable"] + }, + "CreateWriteStream": { + "methods": ["createWriteStream", "createWriteStream", "createWriteStream", "createWriteStreamCallable"] + }, + "FinalizeWriteStream": { + "methods": ["finalizeWriteStream", "finalizeWriteStream", "finalizeWriteStream", "finalizeWriteStreamCallable"] + }, + "FlushRows": { + "methods": ["flushRows", "flushRows", "flushRows", "flushRowsCallable"] + }, + "GetWriteStream": { + "methods": ["getWriteStream", "getWriteStream", "getWriteStream", "getWriteStreamCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/package-info.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/package-info.java new file mode 100644 index 000000000000..6120aaef9d92 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/package-info.java @@ -0,0 +1,72 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to BigQuery Storage API + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= BaseBigQueryReadClient ======================= + * + *

Service Description: BigQuery Read API. + * + *

The Read API can be used to read data from BigQuery. + * + *

Sample for BaseBigQueryReadClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+ *   ProjectName parent = ProjectName.of("[PROJECT]");
+ *   ReadSession readSession = ReadSession.newBuilder().build();
+ *   int maxStreamCount = 940837515;
+ *   ReadSession response =
+ *       baseBigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+ * }
+ * }
+ * + *

======================= BigQueryWriteClient ======================= + * + *

Service Description: BigQuery Write API. + * + *

The Write API can be used to write data to BigQuery. + * + *

For supplementary information about the Write API, see: + * https://cloud.google.com/bigquery/docs/write-api + * + *

Sample for BigQueryWriteClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+ *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
+ *   WriteStream writeStream = WriteStream.newBuilder().build();
+ *   WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
+ * }
+ * }
+ */ +@Generated("by gapic-generator-java") +package com.google.cloud.bigquery.storage.v1; + +import javax.annotation.Generated; diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStub.java new file mode 100644 index 000000000000..4b5f2d7161bd --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStub.java @@ -0,0 +1,53 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the BigQueryRead service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public abstract class BigQueryReadStub implements BackgroundResource { + + public UnaryCallable createReadSessionCallable() { + throw new UnsupportedOperationException("Not implemented: createReadSessionCallable()"); + } + + public ServerStreamingCallable readRowsCallable() { + throw new UnsupportedOperationException("Not implemented: readRowsCallable()"); + } + + public UnaryCallable splitReadStreamCallable() { + throw new UnsupportedOperationException("Not implemented: splitReadStreamCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStubSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStubSettings.java new file mode 100644 index 000000000000..eb68149d7609 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStubSettings.java @@ -0,0 +1,387 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BigQueryReadStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createReadSession: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BigQueryReadStubSettings.Builder baseBigQueryReadSettingsBuilder =
+ *     BigQueryReadStubSettings.newBuilder();
+ * baseBigQueryReadSettingsBuilder
+ *     .createReadSessionSettings()
+ *     .setRetrySettings(
+ *         baseBigQueryReadSettingsBuilder
+ *             .createReadSessionSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * BigQueryReadStubSettings baseBigQueryReadSettings = baseBigQueryReadSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@Generated("by gapic-generator-java") +public class BigQueryReadStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/bigquery") + .add("https://www.googleapis.com/auth/cloud-platform") + .build(); + + private final UnaryCallSettings createReadSessionSettings; + private final ServerStreamingCallSettings readRowsSettings; + private final UnaryCallSettings + splitReadStreamSettings; + + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings readRowsSettings() { + return readRowsSettings; + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + public BigQueryReadStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcBigQueryReadStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "bigquerystorage"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "bigquerystorage.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "bigquerystorage.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(BigQueryReadStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryReadStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createReadSessionSettings = settingsBuilder.createReadSessionSettings().build(); + readRowsSettings = settingsBuilder.readRowsSettings().build(); + splitReadStreamSettings = settingsBuilder.splitReadStreamSettings().build(); + } + + /** Builder for BigQueryReadStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder + createReadSessionSettings; + private final ServerStreamingCallSettings.Builder + readRowsSettings; + private final UnaryCallSettings.Builder + splitReadStreamSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_0_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + definitions.put( + "retry_policy_1_codes", + ImmutableSet.copyOf(Lists.newArrayList(StatusCode.Code.UNAVAILABLE))); + definitions.put( + "retry_policy_2_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(600000L)) + .build(); + definitions.put("retry_policy_0_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(86400000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(86400000L)) + .setTotalTimeoutDuration(Duration.ofMillis(86400000L)) + .build(); + definitions.put("retry_policy_1_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(600000L)) + .build(); + definitions.put("retry_policy_2_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createReadSessionSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + readRowsSettings = ServerStreamingCallSettings.newBuilder(); + splitReadStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, splitReadStreamSettings); + initDefaults(this); + } + + protected Builder(BigQueryReadStubSettings settings) { + super(settings); + + createReadSessionSettings = settings.createReadSessionSettings.toBuilder(); + readRowsSettings = settings.readRowsSettings.toBuilder(); + splitReadStreamSettings = settings.splitReadStreamSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, splitReadStreamSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .createReadSessionSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .readRowsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_1_params")); + + builder + .splitReadStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return readRowsSettings; + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + @Override + public BigQueryReadStubSettings build() throws IOException { + return new BigQueryReadStubSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStub.java new file mode 100644 index 000000000000..b962c2afb09e --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStub.java @@ -0,0 +1,72 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1.FlushRowsRequest; +import com.google.cloud.bigquery.storage.v1.FlushRowsResponse; +import com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the BigQueryWrite service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public abstract class BigQueryWriteStub implements BackgroundResource { + + public UnaryCallable createWriteStreamCallable() { + throw new UnsupportedOperationException("Not implemented: createWriteStreamCallable()"); + } + + public BidiStreamingCallable appendRowsCallable() { + throw new UnsupportedOperationException("Not implemented: appendRowsCallable()"); + } + + public UnaryCallable getWriteStreamCallable() { + throw new UnsupportedOperationException("Not implemented: getWriteStreamCallable()"); + } + + public UnaryCallable + finalizeWriteStreamCallable() { + throw new UnsupportedOperationException("Not implemented: finalizeWriteStreamCallable()"); + } + + public UnaryCallable + batchCommitWriteStreamsCallable() { + throw new UnsupportedOperationException("Not implemented: batchCommitWriteStreamsCallable()"); + } + + public UnaryCallable flushRowsCallable() { + throw new UnsupportedOperationException("Not implemented: flushRowsCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java new file mode 100644 index 000000000000..c8f949cc3ed5 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java @@ -0,0 +1,467 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1.FlushRowsRequest; +import com.google.cloud.bigquery.storage.v1.FlushRowsResponse; +import com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BigQueryWriteStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createWriteStream: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BigQueryWriteStubSettings.Builder bigQueryWriteSettingsBuilder =
+ *     BigQueryWriteStubSettings.newBuilder();
+ * bigQueryWriteSettingsBuilder
+ *     .createWriteStreamSettings()
+ *     .setRetrySettings(
+ *         bigQueryWriteSettingsBuilder
+ *             .createWriteStreamSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * BigQueryWriteStubSettings bigQueryWriteSettings = bigQueryWriteSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@Generated("by gapic-generator-java") +public class BigQueryWriteStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/bigquery") + .add("https://www.googleapis.com/auth/bigquery.insertdata") + .add("https://www.googleapis.com/auth/cloud-platform") + .build(); + + private final UnaryCallSettings createWriteStreamSettings; + private final StreamingCallSettings appendRowsSettings; + private final UnaryCallSettings getWriteStreamSettings; + private final UnaryCallSettings + finalizeWriteStreamSettings; + private final UnaryCallSettings + batchCommitWriteStreamsSettings; + private final UnaryCallSettings flushRowsSettings; + + /** Returns the object with the settings used for calls to createWriteStream. */ + public UnaryCallSettings createWriteStreamSettings() { + return createWriteStreamSettings; + } + + /** Returns the object with the settings used for calls to appendRows. */ + public StreamingCallSettings appendRowsSettings() { + return appendRowsSettings; + } + + /** Returns the object with the settings used for calls to getWriteStream. */ + public UnaryCallSettings getWriteStreamSettings() { + return getWriteStreamSettings; + } + + /** Returns the object with the settings used for calls to finalizeWriteStream. */ + public UnaryCallSettings + finalizeWriteStreamSettings() { + return finalizeWriteStreamSettings; + } + + /** Returns the object with the settings used for calls to batchCommitWriteStreams. */ + public UnaryCallSettings + batchCommitWriteStreamsSettings() { + return batchCommitWriteStreamsSettings; + } + + /** Returns the object with the settings used for calls to flushRows. */ + public UnaryCallSettings flushRowsSettings() { + return flushRowsSettings; + } + + public BigQueryWriteStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcBigQueryWriteStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "bigquerystorage"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "bigquerystorage.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "bigquerystorage.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(BigQueryWriteStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryWriteStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createWriteStreamSettings = settingsBuilder.createWriteStreamSettings().build(); + appendRowsSettings = settingsBuilder.appendRowsSettings().build(); + getWriteStreamSettings = settingsBuilder.getWriteStreamSettings().build(); + finalizeWriteStreamSettings = settingsBuilder.finalizeWriteStreamSettings().build(); + batchCommitWriteStreamsSettings = settingsBuilder.batchCommitWriteStreamsSettings().build(); + flushRowsSettings = settingsBuilder.flushRowsSettings().build(); + } + + /** Builder for BigQueryWriteStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder + createWriteStreamSettings; + private final StreamingCallSettings.Builder + appendRowsSettings; + private final UnaryCallSettings.Builder + getWriteStreamSettings; + private final UnaryCallSettings.Builder + finalizeWriteStreamSettings; + private final UnaryCallSettings.Builder< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsSettings; + private final UnaryCallSettings.Builder flushRowsSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_5_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, + StatusCode.Code.UNAVAILABLE, + StatusCode.Code.RESOURCE_EXHAUSTED))); + definitions.put( + "retry_policy_3_codes", + ImmutableSet.copyOf(Lists.newArrayList(StatusCode.Code.UNAVAILABLE))); + definitions.put( + "retry_policy_4_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, + StatusCode.Code.UNAVAILABLE, + StatusCode.Code.RESOURCE_EXHAUSTED))); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(10000L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(120000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(1200000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(1200000L)) + .setTotalTimeoutDuration(Duration.ofMillis(1200000L)) + .build(); + definitions.put("retry_policy_5_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(86400000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(86400000L)) + .setTotalTimeoutDuration(Duration.ofMillis(86400000L)) + .build(); + definitions.put("retry_policy_3_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(600000L)) + .build(); + definitions.put("retry_policy_4_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + appendRowsSettings = StreamingCallSettings.newBuilder(); + getWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + finalizeWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + batchCommitWriteStreamsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + flushRowsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createWriteStreamSettings, + getWriteStreamSettings, + finalizeWriteStreamSettings, + batchCommitWriteStreamsSettings, + flushRowsSettings); + initDefaults(this); + } + + protected Builder(BigQueryWriteStubSettings settings) { + super(settings); + + createWriteStreamSettings = settings.createWriteStreamSettings.toBuilder(); + appendRowsSettings = settings.appendRowsSettings.toBuilder(); + getWriteStreamSettings = settings.getWriteStreamSettings.toBuilder(); + finalizeWriteStreamSettings = settings.finalizeWriteStreamSettings.toBuilder(); + batchCommitWriteStreamsSettings = settings.batchCommitWriteStreamsSettings.toBuilder(); + flushRowsSettings = settings.flushRowsSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createWriteStreamSettings, + getWriteStreamSettings, + finalizeWriteStreamSettings, + batchCommitWriteStreamsSettings, + flushRowsSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .createWriteStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_5_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_5_params")); + + builder + .getWriteStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_4_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_4_params")); + + builder + .finalizeWriteStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_4_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_4_params")); + + builder + .batchCommitWriteStreamsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_4_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_4_params")); + + builder + .flushRowsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_4_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_4_params")); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createWriteStream. */ + public UnaryCallSettings.Builder + createWriteStreamSettings() { + return createWriteStreamSettings; + } + + /** Returns the builder for the settings used for calls to appendRows. */ + public StreamingCallSettings.Builder + appendRowsSettings() { + return appendRowsSettings; + } + + /** Returns the builder for the settings used for calls to getWriteStream. */ + public UnaryCallSettings.Builder getWriteStreamSettings() { + return getWriteStreamSettings; + } + + /** Returns the builder for the settings used for calls to finalizeWriteStream. */ + public UnaryCallSettings.Builder + finalizeWriteStreamSettings() { + return finalizeWriteStreamSettings; + } + + /** Returns the builder for the settings used for calls to batchCommitWriteStreams. */ + public UnaryCallSettings.Builder< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsSettings() { + return batchCommitWriteStreamsSettings; + } + + /** Returns the builder for the settings used for calls to flushRows. */ + public UnaryCallSettings.Builder flushRowsSettings() { + return flushRowsSettings; + } + + @Override + public BigQueryWriteStubSettings build() throws IOException { + return new BigQueryWriteStubSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStub.java new file mode 100644 index 000000000000..1d85a6aa0d17 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStub.java @@ -0,0 +1,340 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.core.InternalApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcRawCallableFactory; +import com.google.api.gax.retrying.ExponentialRetryAlgorithm; +import com.google.api.gax.retrying.ScheduledRetryingExecutor; +import com.google.api.gax.retrying.StreamingRetryAlgorithm; +import com.google.api.gax.rpc.Callables; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsExtractor; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.api.gax.tracing.SpanName; +import com.google.api.gax.tracing.TracedServerStreamingCallable; +import com.google.cloud.bigquery.storage.v1.BigQueryReadGrpc; +import com.google.cloud.bigquery.storage.v1.BigQueryReadSettings; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.Singletons; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse; +import com.google.cloud.bigquery.storage.v1.stub.readrows.ApiResultRetryAlgorithm; +import com.google.cloud.bigquery.storage.v1.stub.readrows.ReadRowsRetryingCallable; +import com.google.common.collect.ImmutableMap; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.api.trace.TracerProvider; +import io.opentelemetry.context.Scope; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Enhanced stub class for BigQuery Storage API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +public class EnhancedBigQueryReadStub implements BackgroundResource { + + private static final String TRACING_OUTER_CLIENT_NAME = "BigQueryStorage"; + private final GrpcBigQueryReadStub stub; + private final BigQueryReadStubSettings stubSettings; + private final BigQueryReadSettings.RetryAttemptListener readRowsRetryAttemptListener; + private final ClientContext context; + private boolean enableOpenTelemetryTracing = false; + private Tracer openTelemetryTracer = null; + + public static EnhancedBigQueryReadStub create(EnhancedBigQueryReadStubSettings settings) + throws IOException { + return create(settings, null); + } + + public static EnhancedBigQueryReadStub create( + EnhancedBigQueryReadStubSettings settings, + BigQueryReadSettings.RetryAttemptListener readRowsRetryAttemptListener) + throws IOException { + return create(settings, readRowsRetryAttemptListener, false, null); + } + + public static EnhancedBigQueryReadStub create( + EnhancedBigQueryReadStubSettings settings, + BigQueryReadSettings.RetryAttemptListener readRowsRetryAttemptListener, + boolean enableOpenTelemetryTracing, + TracerProvider openTelemetryTracerProvider) + throws IOException { + // Configure the base settings. + BigQueryReadStubSettings.Builder baseSettingsBuilder = + BigQueryReadStubSettings.newBuilder() + .setUniverseDomain(settings.getUniverseDomain()) + .setTransportChannelProvider(settings.getTransportChannelProvider()) + .setEndpoint(settings.getEndpoint()) + .setHeaderProvider(settings.getHeaderProvider()) + .setCredentialsProvider(settings.getCredentialsProvider()) + .setStreamWatchdogCheckInterval(settings.getStreamWatchdogCheckInterval()) + .setStreamWatchdogProvider(settings.getStreamWatchdogProvider()) + .setBackgroundExecutorProvider(settings.getBackgroundExecutorProvider()); + + baseSettingsBuilder + .createReadSessionSettings() + .setRetryableCodes(settings.createReadSessionSettings().getRetryableCodes()) + .setRetrySettings(settings.createReadSessionSettings().getRetrySettings()); + + baseSettingsBuilder + .readRowsSettings() + .setRetryableCodes(settings.readRowsSettings().getRetryableCodes()) + .setRetrySettings(settings.readRowsSettings().getRetrySettings()) + .setResumptionStrategy(settings.readRowsSettings().getResumptionStrategy()) + .setIdleTimeout(settings.readRowsSettings().getIdleTimeout()); + + baseSettingsBuilder + .splitReadStreamSettings() + .setRetryableCodes(settings.splitReadStreamSettings().getRetryableCodes()) + .setRetrySettings(settings.splitReadStreamSettings().getRetrySettings()); + + BigQueryReadStubSettings baseSettings = baseSettingsBuilder.build(); + ClientContext clientContext = ClientContext.create(baseSettings); + GrpcBigQueryReadStub stub = new GrpcBigQueryReadStub(baseSettings, clientContext); + return new EnhancedBigQueryReadStub( + stub, + baseSettings, + readRowsRetryAttemptListener, + clientContext, + enableOpenTelemetryTracing, + openTelemetryTracerProvider); + } + + @InternalApi("Visible for testing") + EnhancedBigQueryReadStub( + GrpcBigQueryReadStub stub, + BigQueryReadStubSettings stubSettings, + BigQueryReadSettings.RetryAttemptListener readRowsRetryAttemptListener, + ClientContext context, + boolean enableOpenTelemetryTracing, + TracerProvider openTelemetryTracerProvider) { + this.stub = stub; + this.stubSettings = stubSettings; + this.readRowsRetryAttemptListener = readRowsRetryAttemptListener; + this.context = context; + this.enableOpenTelemetryTracing = enableOpenTelemetryTracing; + if (enableOpenTelemetryTracing) { + if (openTelemetryTracerProvider == null) { + this.openTelemetryTracer = + Singletons.getOpenTelemetry() + .getTracerProvider() + .tracerBuilder("com.google.cloud.bigquery.storage.v1.read.stub") + .build(); + } else { + this.openTelemetryTracer = + openTelemetryTracerProvider + .tracerBuilder("com.google.cloud.bigquery.storage.v1.read.stub") + .build(); + } + } + } + + public UnaryCallable createReadSessionCallable() { + Span createReadSessionCallable = null; + if (enableOpenTelemetryTracing) { + createReadSessionCallable = + openTelemetryTracer + .spanBuilder( + "com.google.cloud.bigquery.storage.v1.read.stub.createReadSessionCallable") + .startSpan(); + } + try (Scope createReadSessionCallableScope = + createReadSessionCallable != null ? createReadSessionCallable.makeCurrent() : null) { + return stub.createReadSessionCallable(); + } finally { + if (createReadSessionCallable != null) { + createReadSessionCallable.end(); + } + } + } + + public ServerStreamingCallable readRowsCallable() { + Span readRowsCallable = null; + if (enableOpenTelemetryTracing) { + readRowsCallable = + openTelemetryTracer + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.stub.readRowsCallable") + .startSpan(); + } + try (Scope readRowsCallableScope = + readRowsCallable != null ? readRowsCallable.makeCurrent() : null) { + ServerStreamingCallable innerCallable = + GrpcRawCallableFactory.createServerStreamingCallable( + GrpcCallSettings.newBuilder() + .setMethodDescriptor(BigQueryReadGrpc.getReadRowsMethod()) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract(ReadRowsRequest request) { + return ImmutableMap.of( + "read_stream", String.valueOf(request.getReadStream())); + } + }) + .build(), + stubSettings.readRowsSettings().getRetryableCodes()); + ServerStreamingCallSettings callSettings = + stubSettings.readRowsSettings(); + + StreamingRetryAlgorithm retryAlgorithm = + new StreamingRetryAlgorithm<>( + new ApiResultRetryAlgorithm(readRowsRetryAttemptListener), + new ExponentialRetryAlgorithm(callSettings.getRetrySettings(), context.getClock())); + + ScheduledRetryingExecutor retryingExecutor = + new ScheduledRetryingExecutor<>(retryAlgorithm, context.getExecutor()); + + if (context.getStreamWatchdog() != null) { + innerCallable = Callables.watched(innerCallable, callSettings, context); + } + + ReadRowsRetryingCallable outerCallable = + new ReadRowsRetryingCallable( + context.getDefaultCallContext(), + innerCallable, + retryingExecutor, + callSettings.getResumptionStrategy()); + + ServerStreamingCallable traced = + new TracedServerStreamingCallable<>( + outerCallable, + context.getTracerFactory(), + SpanName.of(TRACING_OUTER_CLIENT_NAME, "ReadRows")); + return traced.withDefaultCallContext(context.getDefaultCallContext()); + } finally { + if (readRowsCallable != null) { + readRowsCallable.end(); + } + } + } + + public UnaryCallable splitReadStreamCallable() { + Span splitReadStreamCallable = null; + if (enableOpenTelemetryTracing) { + splitReadStreamCallable = + openTelemetryTracer + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.stub.splitReadStreamCallable") + .startSpan(); + } + try (Scope readRowsCallableScope = + splitReadStreamCallable != null ? splitReadStreamCallable.makeCurrent() : null) { + return stub.splitReadStreamCallable(); + } finally { + if (splitReadStreamCallable != null) { + splitReadStreamCallable.end(); + } + } + } + + @Override + public void close() { + Span close = null; + if (enableOpenTelemetryTracing) { + close = + openTelemetryTracer + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.stub.close") + .startSpan(); + } + try (Scope closeScope = close != null ? close.makeCurrent() : null) { + stub.close(); + } finally { + if (close != null) { + close.end(); + } + } + } + + @Override + public void shutdown() { + Span shutdown = null; + if (enableOpenTelemetryTracing) { + shutdown = + openTelemetryTracer + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.stub.shutdown") + .startSpan(); + } + try (Scope shutdownScope = shutdown != null ? shutdown.makeCurrent() : null) { + stub.shutdown(); + } finally { + if (shutdown != null) { + shutdown.end(); + } + } + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + Span shutdownNow = null; + if (enableOpenTelemetryTracing) { + shutdownNow = + openTelemetryTracer + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.stub.shutdownNow") + .startSpan(); + } + try (Scope shutdownNowScope = shutdownNow != null ? shutdownNow.makeCurrent() : null) { + stub.shutdownNow(); + } finally { + if (shutdownNow != null) { + shutdownNow.end(); + } + } + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + Span awaitTermination = null; + if (enableOpenTelemetryTracing) { + awaitTermination = + openTelemetryTracer + .spanBuilder("com.google.cloud.bigquery.storage.v1.read.stub.awaitTermination") + .setAttribute("duration", duration) + .setAttribute("unit", unit.toString()) + .startSpan(); + } + try (Scope awaitTerminationScope = + awaitTermination != null ? awaitTermination.makeCurrent() : null) { + return stub.awaitTermination(duration, unit); + } finally { + if (awaitTermination != null) { + awaitTermination.end(); + } + } + } + + public BigQueryReadStubSettings getStubSettings() { + return stubSettings; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettings.java new file mode 100644 index 000000000000..957c71c01906 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettings.java @@ -0,0 +1,239 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1.BaseBigQueryReadSettings; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse; +import com.google.cloud.bigquery.storage.v1.stub.readrows.ReadRowsResumptionStrategy; +import com.google.common.collect.ImmutableList; +import java.util.List; + +/** + * Settings class to configure an instance of {@link EnhancedBigQueryReadStub}. + * + *

The default instance dynamically reads and applies the default values used by {@link + * BigQueryReadStub}. + * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of createReadSession to 30 seconds: + * + *

+ * 
+ * EnhancedBigQueryReadStubSettings.Builder builder =
+ *     EnhancedBigQueryReadStubSettings.newBuilder();
+ * builder.createReadSessionSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * EnhancedBigQueryReadStubSettings settings = builder.build();
+ * 
+ * 
+ */ +public class EnhancedBigQueryReadStubSettings + extends StubSettings { + + private final UnaryCallSettings createReadSessionSettings; + private final ServerStreamingCallSettings readRowsSettings; + private final UnaryCallSettings + splitReadStreamSettings; + + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings readRowsSettings() { + return readRowsSettings; + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return BigQueryReadStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "bigquerystorage"; + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return BigQueryReadStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return BigQueryReadStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return BaseBigQueryReadSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return BigQueryReadStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return BigQueryReadStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return new Builder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected EnhancedBigQueryReadStubSettings(Builder settingsBuilder) { + super(settingsBuilder); + + createReadSessionSettings = settingsBuilder.createReadSessionSettings().build(); + readRowsSettings = settingsBuilder.readRowsSettings().build(); + splitReadStreamSettings = settingsBuilder.splitReadStreamSettings().build(); + } + + /** Builder for {@link EnhancedBigQueryReadStubSettings}. */ + public static class Builder + extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + + private final UnaryCallSettings.Builder + createReadSessionSettings; + private final ServerStreamingCallSettings.Builder + readRowsSettings; + private final UnaryCallSettings.Builder + splitReadStreamSettings; + + protected Builder() { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + // Defaults provider + BigQueryReadStubSettings.Builder baseDefaults = BigQueryReadStubSettings.newBuilder(); + setTransportChannelProvider(defaultTransportChannelProvider()); + setCredentialsProvider(baseDefaults.getCredentialsProvider()); + setStreamWatchdogCheckInterval(baseDefaults.getStreamWatchdogCheckInterval()); + setStreamWatchdogProvider(baseDefaults.getStreamWatchdogProvider()); + + // Per-method settings using baseSettings for defaults. + createReadSessionSettings = baseDefaults.createReadSessionSettings(); + splitReadStreamSettings = baseDefaults.splitReadStreamSettings(); + + // Per-method settings using override values for defaults. + readRowsSettings = + baseDefaults.readRowsSettings().setResumptionStrategy(new ReadRowsResumptionStrategy()); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, splitReadStreamSettings); + } + + protected Builder(EnhancedBigQueryReadStubSettings settings) { + super(settings); + + createReadSessionSettings = settings.createReadSessionSettings.toBuilder(); + readRowsSettings = settings.readRowsSettings.toBuilder(); + splitReadStreamSettings = settings.splitReadStreamSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, splitReadStreamSettings); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return readRowsSettings; + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + @Override + public EnhancedBigQueryReadStubSettings build() { + return new EnhancedBigQueryReadStubSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadCallableFactory.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadCallableFactory.java new file mode 100644 index 000000000000..3127fbea870a --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadCallableFactory.java @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the BigQueryRead service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class GrpcBigQueryReadCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadStub.java new file mode 100644 index 000000000000..2dba1b3fe42a --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadStub.java @@ -0,0 +1,231 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse; +import com.google.longrunning.stub.GrpcOperationsStub; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the BigQueryRead service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class GrpcBigQueryReadStub extends BigQueryReadStub { + private static final MethodDescriptor + createReadSessionMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.bigquery.storage.v1.BigQueryRead/CreateReadSession") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateReadSessionRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ReadSession.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + readRowsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName("google.cloud.bigquery.storage.v1.BigQueryRead/ReadRows") + .setRequestMarshaller(ProtoUtils.marshaller(ReadRowsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ReadRowsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + splitReadStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.bigquery.storage.v1.BigQueryRead/SplitReadStream") + .setRequestMarshaller( + ProtoUtils.marshaller(SplitReadStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(SplitReadStreamResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable createReadSessionCallable; + private final ServerStreamingCallable readRowsCallable; + private final UnaryCallable + splitReadStreamCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcBigQueryReadStub create(BigQueryReadStubSettings settings) + throws IOException { + return new GrpcBigQueryReadStub(settings, ClientContext.create(settings)); + } + + public static final GrpcBigQueryReadStub create(ClientContext clientContext) throws IOException { + return new GrpcBigQueryReadStub(BigQueryReadStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcBigQueryReadStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcBigQueryReadStub( + BigQueryReadStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcBigQueryReadStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryReadStub(BigQueryReadStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcBigQueryReadCallableFactory()); + } + + /** + * Constructs an instance of GrpcBigQueryReadStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryReadStub( + BigQueryReadStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings createReadSessionTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createReadSessionMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "read_session.table", String.valueOf(request.getReadSession().getTable())); + return builder.build(); + }) + .build(); + GrpcCallSettings readRowsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(readRowsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("read_stream", String.valueOf(request.getReadStream())); + return builder.build(); + }) + .build(); + GrpcCallSettings + splitReadStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(splitReadStreamMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + + this.createReadSessionCallable = + callableFactory.createUnaryCallable( + createReadSessionTransportSettings, + settings.createReadSessionSettings(), + clientContext); + this.readRowsCallable = + callableFactory.createServerStreamingCallable( + readRowsTransportSettings, settings.readRowsSettings(), clientContext); + this.splitReadStreamCallable = + callableFactory.createUnaryCallable( + splitReadStreamTransportSettings, settings.splitReadStreamSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable createReadSessionCallable() { + return createReadSessionCallable; + } + + @Override + public ServerStreamingCallable readRowsCallable() { + return readRowsCallable; + } + + @Override + public UnaryCallable splitReadStreamCallable() { + return splitReadStreamCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteCallableFactory.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteCallableFactory.java new file mode 100644 index 000000000000..929eb01577cd --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteCallableFactory.java @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the BigQueryWrite service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class GrpcBigQueryWriteCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteStub.java new file mode 100644 index 000000000000..b8090f2e281a --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteStub.java @@ -0,0 +1,338 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1.FlushRowsRequest; +import com.google.cloud.bigquery.storage.v1.FlushRowsResponse; +import com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.longrunning.stub.GrpcOperationsStub; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the BigQueryWrite service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class GrpcBigQueryWriteStub extends BigQueryWriteStub { + private static final MethodDescriptor + createWriteStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.bigquery.storage.v1.BigQueryWrite/CreateWriteStream") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(WriteStream.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + appendRowsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName("google.cloud.bigquery.storage.v1.BigQueryWrite/AppendRows") + .setRequestMarshaller(ProtoUtils.marshaller(AppendRowsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(AppendRowsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getWriteStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.bigquery.storage.v1.BigQueryWrite/GetWriteStream") + .setRequestMarshaller( + ProtoUtils.marshaller(GetWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(WriteStream.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + finalizeWriteStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1.BigQueryWrite/FinalizeWriteStream") + .setRequestMarshaller( + ProtoUtils.marshaller(FinalizeWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(FinalizeWriteStreamResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1.BigQueryWrite/BatchCommitWriteStreams") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchCommitWriteStreamsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(BatchCommitWriteStreamsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + flushRowsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.bigquery.storage.v1.BigQueryWrite/FlushRows") + .setRequestMarshaller(ProtoUtils.marshaller(FlushRowsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(FlushRowsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable createWriteStreamCallable; + private final BidiStreamingCallable appendRowsCallable; + private final UnaryCallable getWriteStreamCallable; + private final UnaryCallable + finalizeWriteStreamCallable; + private final UnaryCallable + batchCommitWriteStreamsCallable; + private final UnaryCallable flushRowsCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcBigQueryWriteStub create(BigQueryWriteStubSettings settings) + throws IOException { + return new GrpcBigQueryWriteStub(settings, ClientContext.create(settings)); + } + + public static final GrpcBigQueryWriteStub create(ClientContext clientContext) throws IOException { + return new GrpcBigQueryWriteStub(BigQueryWriteStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcBigQueryWriteStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcBigQueryWriteStub( + BigQueryWriteStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcBigQueryWriteStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryWriteStub(BigQueryWriteStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcBigQueryWriteCallableFactory()); + } + + /** + * Constructs an instance of GrpcBigQueryWriteStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryWriteStub( + BigQueryWriteStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings createWriteStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createWriteStreamMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings appendRowsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(appendRowsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("write_stream", String.valueOf(request.getWriteStream())); + return builder.build(); + }) + .build(); + GrpcCallSettings getWriteStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getWriteStreamMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + finalizeWriteStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(finalizeWriteStreamMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + batchCommitWriteStreamsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(batchCommitWriteStreamsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings flushRowsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(flushRowsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("write_stream", String.valueOf(request.getWriteStream())); + return builder.build(); + }) + .build(); + + this.createWriteStreamCallable = + callableFactory.createUnaryCallable( + createWriteStreamTransportSettings, + settings.createWriteStreamSettings(), + clientContext); + this.appendRowsCallable = + callableFactory.createBidiStreamingCallable( + appendRowsTransportSettings, settings.appendRowsSettings(), clientContext); + this.getWriteStreamCallable = + callableFactory.createUnaryCallable( + getWriteStreamTransportSettings, settings.getWriteStreamSettings(), clientContext); + this.finalizeWriteStreamCallable = + callableFactory.createUnaryCallable( + finalizeWriteStreamTransportSettings, + settings.finalizeWriteStreamSettings(), + clientContext); + this.batchCommitWriteStreamsCallable = + callableFactory.createUnaryCallable( + batchCommitWriteStreamsTransportSettings, + settings.batchCommitWriteStreamsSettings(), + clientContext); + this.flushRowsCallable = + callableFactory.createUnaryCallable( + flushRowsTransportSettings, settings.flushRowsSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable createWriteStreamCallable() { + return createWriteStreamCallable; + } + + @Override + public BidiStreamingCallable appendRowsCallable() { + return appendRowsCallable; + } + + @Override + public UnaryCallable getWriteStreamCallable() { + return getWriteStreamCallable; + } + + @Override + public UnaryCallable + finalizeWriteStreamCallable() { + return finalizeWriteStreamCallable; + } + + @Override + public UnaryCallable + batchCommitWriteStreamsCallable() { + return batchCommitWriteStreamsCallable; + } + + @Override + public UnaryCallable flushRowsCallable() { + return flushRowsCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ApiResultRetryAlgorithm.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ApiResultRetryAlgorithm.java new file mode 100644 index 000000000000..046adb90c84e --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ApiResultRetryAlgorithm.java @@ -0,0 +1,89 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub.readrows; + +import com.google.api.core.InternalApi; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.retrying.TimedAttemptSettings; +import com.google.api.gax.rpc.ApiException; +import com.google.cloud.bigquery.storage.util.Errors; +import com.google.cloud.bigquery.storage.v1.BigQueryReadSettings; +import io.grpc.Metadata; +import io.grpc.Status; +import java.time.Duration; + +/** For internal use, public for technical reasons. */ +@InternalApi +public class ApiResultRetryAlgorithm implements ResultRetryAlgorithm { + // Duration to sleep on if the error is DEADLINE_EXCEEDED. + public static final Duration DEADLINE_SLEEP_DURATION = Duration.ofMillis(1); + + private final BigQueryReadSettings.RetryAttemptListener retryAttemptListener; + + public ApiResultRetryAlgorithm() { + this(null); + } + + public ApiResultRetryAlgorithm(BigQueryReadSettings.RetryAttemptListener retryAttemptListener) { + super(); + this.retryAttemptListener = retryAttemptListener; + } + + @Override + public TimedAttemptSettings createNextAttempt( + Throwable prevThrowable, ResponseT prevResponse, TimedAttemptSettings prevSettings) { + if (prevThrowable != null) { + Status status = Status.fromThrowable(prevThrowable); + Metadata metadata = Status.trailersFromThrowable(prevThrowable); + Errors.IsRetryableStatusResult result = Errors.isRetryableStatus(status, metadata); + if (result.isRetryable) { + // If result.retryDelay isn't null, we know exactly how long we must wait, so both regular + // and randomized delays are the same. + Duration retryDelay = result.retryDelay; + Duration randomizedRetryDelay = result.retryDelay; + if (retryDelay == null) { + retryDelay = prevSettings.getRetryDelayDuration(); + randomizedRetryDelay = DEADLINE_SLEEP_DURATION; + } + if (retryAttemptListener != null) { + retryAttemptListener.onRetryAttempt(status, metadata); + } + return TimedAttemptSettings.newBuilder() + .setGlobalSettings(prevSettings.getGlobalSettings()) + .setRetryDelayDuration(retryDelay) + .setRpcTimeout(prevSettings.getRpcTimeout()) + .setRandomizedRetryDelayDuration(randomizedRetryDelay) + .setAttemptCount(prevSettings.getAttemptCount() + 1) + .setFirstAttemptStartTimeNanos(prevSettings.getFirstAttemptStartTimeNanos()) + .build(); + } + } + return null; + } + + @Override + public boolean shouldRetry(Throwable prevThrowable, ResponseT prevResponse) { + if (prevThrowable != null) { + Status status = Status.fromThrowable(prevThrowable); + Metadata metadata = Status.trailersFromThrowable(prevThrowable); + if (Errors.isRetryableStatus(status, metadata).isRetryable) { + return true; + } + } + return (prevThrowable instanceof ApiException) && ((ApiException) prevThrowable).isRetryable(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsAttemptCallable.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsAttemptCallable.java new file mode 100644 index 000000000000..052ee315a071 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsAttemptCallable.java @@ -0,0 +1,326 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub.readrows; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.retrying.ServerStreamingAttemptException; +import com.google.api.gax.retrying.StreamResumptionStrategy; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StateCheckingResponseObserver; +import com.google.api.gax.rpc.StreamController; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.common.base.Preconditions; +import java.time.Duration; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import javax.annotation.concurrent.GuardedBy; + +final class ReadRowsAttemptCallable implements Callable { + private final Object lock = new Object(); + + private final ServerStreamingCallable innerCallable; + private final StreamResumptionStrategy resumptionStrategy; + private final ReadRowsRequest initialRequest; + private ApiCallContext context; + private final ResponseObserver outerObserver; + + // Start state + private boolean autoFlowControl = true; + private boolean isStarted; + + // Outer state + @GuardedBy("lock") + private Throwable cancellationCause; + + @GuardedBy("lock") + private int pendingRequests; + + private RetryingFuture outerRetryingFuture; + + // Internal retry state + private int numAttempts; + + @GuardedBy("lock") + private StreamController innerController; + + private boolean seenSuccessSinceLastError; + private SettableApiFuture innerAttemptFuture; + + ReadRowsAttemptCallable( + ServerStreamingCallable innerCallable, + StreamResumptionStrategy resumptionStrategy, + ReadRowsRequest initialRequest, + ApiCallContext context, + ResponseObserver outerObserver) { + this.innerCallable = innerCallable; + this.resumptionStrategy = resumptionStrategy; + this.initialRequest = initialRequest; + this.context = context; + this.outerObserver = outerObserver; + } + + /** Sets controlling {@link RetryingFuture}. Must be called be before {@link #start()}. */ + void setExternalFuture(RetryingFuture retryingFuture) { + Preconditions.checkState(!isStarted, "Can't change the RetryingFuture once the call has start"); + Preconditions.checkNotNull(retryingFuture, "RetryingFuture can't be null"); + + this.outerRetryingFuture = retryingFuture; + } + + /** + * Starts the initial call. The call is attempted on the caller's thread. Further call attempts + * will be scheduled by the {@link RetryingFuture}. + */ + public void start() { + Preconditions.checkState(!isStarted, "Already started"); + + // Initialize the outer observer + outerObserver.onStart( + new StreamController() { + @Override + public void disableAutoInboundFlowControl() { + Preconditions.checkState( + !isStarted, "Can't disable auto flow control once the stream is started"); + autoFlowControl = false; + } + + @Override + public void request(int count) { + onRequest(count); + } + + @Override + public void cancel() { + onCancel(); + } + }); + + if (autoFlowControl) { + synchronized (lock) { + pendingRequests = Integer.MAX_VALUE; + } + } + isStarted = true; + + // Propagate the totalTimeout as the overall stream deadline. + Duration totalTimeout = + outerRetryingFuture.getAttemptSettings().getGlobalSettings().getTotalTimeoutDuration(); + + if (totalTimeout != null && context != null) { + context = context.withTimeoutDuration(totalTimeout); + } + + // Call the inner callable + call(); + } + + /** + * Sends the actual RPC. The request being sent will first be transformed by the {@link + * StreamResumptionStrategy}. + * + *

This method expects to be called by one thread at a time. Furthermore, it expects that the + * current RPC finished before the next time it's called. + */ + @Override + public Void call() { + Preconditions.checkState(isStarted, "Must be started first"); + + ReadRowsRequest request = + (++numAttempts == 1) ? initialRequest : resumptionStrategy.getResumeRequest(initialRequest); + + // Should never happen. onAttemptError will check if ResumptionStrategy can create a resume + // request, + // which the RetryingFuture/StreamResumptionStrategy should respect. + Preconditions.checkState(request != null, "ResumptionStrategy returned a null request."); + + innerAttemptFuture = SettableApiFuture.create(); + seenSuccessSinceLastError = false; + + ApiCallContext attemptContext = context; + + if (!outerRetryingFuture.getAttemptSettings().getRpcTimeout().isZero()) { + attemptContext = + attemptContext.withStreamWaitTimeout( + outerRetryingFuture.getAttemptSettings().getRpcTimeout()); + } + + attemptContext + .getTracer() + .attemptStarted(outerRetryingFuture.getAttemptSettings().getOverallAttemptCount()); + + innerCallable.call( + request, + new StateCheckingResponseObserver() { + @Override + public void onStartImpl(StreamController controller) { + onAttemptStart(controller); + } + + @Override + public void onResponseImpl(ReadRowsResponse response) { + onAttemptResponse(response); + } + + @Override + public void onErrorImpl(Throwable t) { + onAttemptError(t); + } + + @Override + public void onCompleteImpl() { + onAttemptComplete(); + } + }, + attemptContext); + + outerRetryingFuture.setAttemptFuture(innerAttemptFuture); + + return null; + } + + /** + * Called by the inner {@link ServerStreamingCallable} when the call is about to start. This will + * transfer unfinished state from the previous attempt. + * + * @see ResponseObserver#onStart(StreamController) + */ + private void onAttemptStart(StreamController controller) { + if (!autoFlowControl) { + controller.disableAutoInboundFlowControl(); + } + + Throwable localCancellationCause; + int numToRequest = 0; + + synchronized (lock) { + innerController = controller; + + localCancellationCause = this.cancellationCause; + + if (!autoFlowControl) { + numToRequest = pendingRequests; + } + } + + if (localCancellationCause != null) { + controller.cancel(); + } else if (numToRequest > 0) { + controller.request(numToRequest); + } + } + + /** + * Called when the outer {@link ResponseObserver} wants to prematurely cancel the stream. + * + * @see StreamController#cancel() + */ + private void onCancel() { + StreamController localInnerController; + + synchronized (lock) { + if (cancellationCause != null) { + return; + } + // NOTE: BasicRetryingFuture will replace j.u.c.CancellationExceptions with it's own, + // which will not have the current stacktrace, so a special wrapper has be used here. + cancellationCause = + new ServerStreamingAttemptException( + new CancellationException("User cancelled stream"), + resumptionStrategy.canResume(), + seenSuccessSinceLastError); + localInnerController = innerController; + } + + if (localInnerController != null) { + localInnerController.cancel(); + } + } + + /** + * Called when the outer {@link ResponseObserver} is ready for more data. + * + * @see StreamController#request(int) + */ + private void onRequest(int count) { + Preconditions.checkState(!autoFlowControl, "Automatic flow control is enabled"); + Preconditions.checkArgument(count > 0, "Count must be > 0"); + + final StreamController localInnerController; + + synchronized (lock) { + int maxInc = Integer.MAX_VALUE - pendingRequests; + count = Math.min(maxInc, count); + + pendingRequests += count; + localInnerController = this.innerController; + } + + // Note: there is a race condition here where the count might go to the previous attempt's + // StreamController after it failed. But it doesn't matter, because the controller will just + // ignore it and the current controller will pick it up onStart. + if (localInnerController != null) { + localInnerController.request(count); + } + } + + /** Called when the inner callable has responses to deliver. */ + private void onAttemptResponse(ReadRowsResponse message) { + if (!autoFlowControl) { + synchronized (lock) { + pendingRequests--; + } + } + // Update local state to allow for future resume. + seenSuccessSinceLastError = true; + message = resumptionStrategy.processResponse(message); + // Notify the outer observer. + outerObserver.onResponse(message); + } + + /** + * Called when the current RPC fails. The error will be bubbled up to the outer {@link + * RetryingFuture} via the {@link #innerAttemptFuture}. + */ + private void onAttemptError(Throwable throwable) { + Throwable localCancellationCause; + synchronized (lock) { + localCancellationCause = cancellationCause; + } + + if (localCancellationCause != null) { + // Take special care to preserve the cancellation's stack trace. + innerAttemptFuture.setException(localCancellationCause); + } else { + // Wrap the original exception and provide more context for StreamingRetryAlgorithm. + innerAttemptFuture.setException( + new ServerStreamingAttemptException( + throwable, resumptionStrategy.canResume(), seenSuccessSinceLastError)); + } + } + + /** + * Called when the current RPC successfully completes. Notifies the outer {@link RetryingFuture} + * via {@link #innerAttemptFuture}. + */ + private void onAttemptComplete() { + innerAttemptFuture.set(null); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsResumptionStrategy.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsResumptionStrategy.java new file mode 100644 index 000000000000..721e3e36737c --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsResumptionStrategy.java @@ -0,0 +1,72 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1.stub.readrows; + +import com.google.api.core.InternalApi; +import com.google.api.gax.retrying.StreamResumptionStrategy; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import javax.annotation.Nonnull; + +/** + * An implementation of a {@link StreamResumptionStrategy} for the ReadRows API. This class tracks + * the offset of the last row received and, upon retry, attempts to resume the stream at the next + * offset. + * + *

This class is considered an internal implementation detail and not meant to be used by + * applications. + */ +@InternalApi +public class ReadRowsResumptionStrategy + implements StreamResumptionStrategy { + + // Number of rows processed. + private long rowsProcessed = 0; + + @Override + @Nonnull + public StreamResumptionStrategy createNew() { + return new ReadRowsResumptionStrategy(); + } + + @Override + @Nonnull + public ReadRowsResponse processResponse(ReadRowsResponse response) { + rowsProcessed += response.getRowCount(); + return response; + } + + /** + * {@inheritDoc} + * + *

Given the initial/original request, this implementation generates a request that will yield + * a new stream whose first response would come right after the last response received by + * processResponse. It takes into account the offset from the original request. + */ + @Override + public ReadRowsRequest getResumeRequest(ReadRowsRequest originalRequest) { + ReadRowsRequest.Builder resumeRequestBuilder = originalRequest.toBuilder(); + + resumeRequestBuilder.setOffset(originalRequest.getOffset() + rowsProcessed); + + return resumeRequestBuilder.build(); + } + + @Override + public boolean canResume() { + return true; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryingCallable.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryingCallable.java new file mode 100644 index 000000000000..cbcacbcff7b1 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryingCallable.java @@ -0,0 +1,91 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.stub.readrows; + +import static com.google.common.util.concurrent.MoreExecutors.directExecutor; + +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.retrying.ScheduledRetryingExecutor; +import com.google.api.gax.retrying.ServerStreamingAttemptException; +import com.google.api.gax.retrying.StreamResumptionStrategy; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; + +public final class ReadRowsRetryingCallable + extends ServerStreamingCallable { + + private final ApiCallContext context; + private final ServerStreamingCallable innerCallable; + private final ScheduledRetryingExecutor executor; + private final StreamResumptionStrategy + resumptionStrategyPrototype; + + public ReadRowsRetryingCallable( + ApiCallContext context, + ServerStreamingCallable innerCallable, + ScheduledRetryingExecutor executor, + StreamResumptionStrategy resumptionStrategyPrototype) { + this.context = context; + this.innerCallable = innerCallable; + this.executor = executor; + this.resumptionStrategyPrototype = resumptionStrategyPrototype; + } + + @Override + public void call( + ReadRowsRequest request, + final ResponseObserver responseObserver, + ApiCallContext context) { + ApiCallContext actualContext = this.context.merge(context); + ReadRowsAttemptCallable attemptCallable = + new ReadRowsAttemptCallable( + innerCallable, + resumptionStrategyPrototype.createNew(), + request, + actualContext, + responseObserver); + + RetryingFuture retryingFuture = executor.createFuture(attemptCallable, actualContext); + attemptCallable.setExternalFuture(retryingFuture); + attemptCallable.start(); + + // Bridge the future result back to the external responseObserver + ApiFutures.addCallback( + retryingFuture, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable throwable) { + // Make sure to unwrap the underlying ApiException + if (throwable instanceof ServerStreamingAttemptException) { + throwable = throwable.getCause(); + } + responseObserver.onError(throwable); + } + + @Override + public void onSuccess(Void ignored) { + responseObserver.onComplete(); + } + }, + directExecutor()); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/package-info.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/package-info.java new file mode 100644 index 000000000000..1e579eb7ae64 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/readrows/package-info.java @@ -0,0 +1,16 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1.stub.readrows; diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClient.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClient.java new file mode 100644 index 000000000000..b543150c4389 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClient.java @@ -0,0 +1,634 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1alpha; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1alpha.stub.MetastorePartitionServiceStub; +import com.google.cloud.bigquery.storage.v1alpha.stub.MetastorePartitionServiceStubSettings; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: BigQuery Metastore Partition Service API. This service is used for managing + * metastore partitions in BigQuery metastore. The service supports only batch operations for write. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+ *     MetastorePartitionServiceClient.create()) {
+ *   BatchCreateMetastorePartitionsRequest request =
+ *       BatchCreateMetastorePartitionsRequest.newBuilder()
+ *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+ *           .addAllRequests(new ArrayList())
+ *           .setSkipExistingPartitions(true)
+ *           .setTraceId("traceId-1067401920")
+ *           .build();
+ *   BatchCreateMetastorePartitionsResponse response =
+ *       metastorePartitionServiceClient.batchCreateMetastorePartitions(request);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the MetastorePartitionServiceClient object to clean up + * resources such as threads. In the example above, try-with-resources is used, which automatically + * calls close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

BatchCreateMetastorePartitions

Adds metastore partitions to a table.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchCreateMetastorePartitions(BatchCreateMetastorePartitionsRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchCreateMetastorePartitionsCallable() + *

+ *

BatchDeleteMetastorePartitions

Deletes metastore partitions from a table.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchDeleteMetastorePartitions(BatchDeleteMetastorePartitionsRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchDeleteMetastorePartitionsCallable() + *

+ *

BatchUpdateMetastorePartitions

Updates metastore partitions in a table.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchUpdateMetastorePartitions(BatchUpdateMetastorePartitionsRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchUpdateMetastorePartitionsCallable() + *

+ *

ListMetastorePartitions

Gets metastore partitions from a table.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listMetastorePartitions(ListMetastorePartitionsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listMetastorePartitions(TableName parent) + *

  • listMetastorePartitions(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listMetastorePartitionsCallable() + *

+ *

StreamMetastorePartitions

This is a bi-di streaming rpc method that allows the client to send a stream of partitions and commit all of them atomically at the end. If the commit is successful, the server will return a response and close the stream. If the commit fails (due to duplicate partitions or other reason), the server will close the stream with an error. This method is only available via the gRPC API (not REST).

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • streamMetastorePartitionsCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of + * MetastorePartitionServiceSettings to create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * MetastorePartitionServiceSettings metastorePartitionServiceSettings =
+ *     MetastorePartitionServiceSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * MetastorePartitionServiceClient metastorePartitionServiceClient =
+ *     MetastorePartitionServiceClient.create(metastorePartitionServiceSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * MetastorePartitionServiceSettings metastorePartitionServiceSettings =
+ *     MetastorePartitionServiceSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * MetastorePartitionServiceClient metastorePartitionServiceClient =
+ *     MetastorePartitionServiceClient.create(metastorePartitionServiceSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class MetastorePartitionServiceClient implements BackgroundResource { + private final MetastorePartitionServiceSettings settings; + private final MetastorePartitionServiceStub stub; + + /** Constructs an instance of MetastorePartitionServiceClient with default settings. */ + public static final MetastorePartitionServiceClient create() throws IOException { + return create(MetastorePartitionServiceSettings.newBuilder().build()); + } + + /** + * Constructs an instance of MetastorePartitionServiceClient, using the given settings. The + * channels are created based on the settings passed in, or defaults for any settings that are not + * set. + */ + public static final MetastorePartitionServiceClient create( + MetastorePartitionServiceSettings settings) throws IOException { + return new MetastorePartitionServiceClient(settings); + } + + /** + * Constructs an instance of MetastorePartitionServiceClient, using the given stub for making + * calls. This is for advanced usage - prefer using create(MetastorePartitionServiceSettings). + */ + public static final MetastorePartitionServiceClient create(MetastorePartitionServiceStub stub) { + return new MetastorePartitionServiceClient(stub); + } + + /** + * Constructs an instance of MetastorePartitionServiceClient, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected MetastorePartitionServiceClient(MetastorePartitionServiceSettings settings) + throws IOException { + this.settings = settings; + this.stub = ((MetastorePartitionServiceStubSettings) settings.getStubSettings()).createStub(); + } + + protected MetastorePartitionServiceClient(MetastorePartitionServiceStub stub) { + this.settings = null; + this.stub = stub; + } + + public final MetastorePartitionServiceSettings getSettings() { + return settings; + } + + public MetastorePartitionServiceStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Adds metastore partitions to a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BatchCreateMetastorePartitionsRequest request =
+   *       BatchCreateMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllRequests(new ArrayList())
+   *           .setSkipExistingPartitions(true)
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   BatchCreateMetastorePartitionsResponse response =
+   *       metastorePartitionServiceClient.batchCreateMetastorePartitions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCreateMetastorePartitionsResponse batchCreateMetastorePartitions( + BatchCreateMetastorePartitionsRequest request) { + return batchCreateMetastorePartitionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Adds metastore partitions to a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BatchCreateMetastorePartitionsRequest request =
+   *       BatchCreateMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllRequests(new ArrayList())
+   *           .setSkipExistingPartitions(true)
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   ApiFuture future =
+   *       metastorePartitionServiceClient
+   *           .batchCreateMetastorePartitionsCallable()
+   *           .futureCall(request);
+   *   // Do something.
+   *   BatchCreateMetastorePartitionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsCallable() { + return stub.batchCreateMetastorePartitionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes metastore partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BatchDeleteMetastorePartitionsRequest request =
+   *       BatchDeleteMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllPartitionValues(new ArrayList())
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   metastorePartitionServiceClient.batchDeleteMetastorePartitions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void batchDeleteMetastorePartitions(BatchDeleteMetastorePartitionsRequest request) { + batchDeleteMetastorePartitionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes metastore partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BatchDeleteMetastorePartitionsRequest request =
+   *       BatchDeleteMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllPartitionValues(new ArrayList())
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   ApiFuture future =
+   *       metastorePartitionServiceClient
+   *           .batchDeleteMetastorePartitionsCallable()
+   *           .futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + batchDeleteMetastorePartitionsCallable() { + return stub.batchDeleteMetastorePartitionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates metastore partitions in a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BatchUpdateMetastorePartitionsRequest request =
+   *       BatchUpdateMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllRequests(new ArrayList())
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   BatchUpdateMetastorePartitionsResponse response =
+   *       metastorePartitionServiceClient.batchUpdateMetastorePartitions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchUpdateMetastorePartitionsResponse batchUpdateMetastorePartitions( + BatchUpdateMetastorePartitionsRequest request) { + return batchUpdateMetastorePartitionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates metastore partitions in a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BatchUpdateMetastorePartitionsRequest request =
+   *       BatchUpdateMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllRequests(new ArrayList())
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   ApiFuture future =
+   *       metastorePartitionServiceClient
+   *           .batchUpdateMetastorePartitionsCallable()
+   *           .futureCall(request);
+   *   // Do something.
+   *   BatchUpdateMetastorePartitionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsCallable() { + return stub.batchUpdateMetastorePartitionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets metastore partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
+   *   ListMetastorePartitionsResponse response =
+   *       metastorePartitionServiceClient.listMetastorePartitions(parent);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which these metastore partitions belong, in + * the format of projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListMetastorePartitionsResponse listMetastorePartitions(TableName parent) { + ListMetastorePartitionsRequest request = + ListMetastorePartitionsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listMetastorePartitions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets metastore partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   String parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString();
+   *   ListMetastorePartitionsResponse response =
+   *       metastorePartitionServiceClient.listMetastorePartitions(parent);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which these metastore partitions belong, in + * the format of projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListMetastorePartitionsResponse listMetastorePartitions(String parent) { + ListMetastorePartitionsRequest request = + ListMetastorePartitionsRequest.newBuilder().setParent(parent).build(); + return listMetastorePartitions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets metastore partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   ListMetastorePartitionsRequest request =
+   *       ListMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   ListMetastorePartitionsResponse response =
+   *       metastorePartitionServiceClient.listMetastorePartitions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListMetastorePartitionsResponse listMetastorePartitions( + ListMetastorePartitionsRequest request) { + return listMetastorePartitionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets metastore partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   ListMetastorePartitionsRequest request =
+   *       ListMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   ApiFuture future =
+   *       metastorePartitionServiceClient.listMetastorePartitionsCallable().futureCall(request);
+   *   // Do something.
+   *   ListMetastorePartitionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + listMetastorePartitionsCallable() { + return stub.listMetastorePartitionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * This is a bi-di streaming rpc method that allows the client to send a stream of partitions and + * commit all of them atomically at the end. If the commit is successful, the server will return a + * response and close the stream. If the commit fails (due to duplicate partitions or other + * reason), the server will close the stream with an error. This method is only available via the + * gRPC API (not REST). + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BidiStream bidiStream =
+   *       metastorePartitionServiceClient.streamMetastorePartitionsCallable().call();
+   *   StreamMetastorePartitionsRequest request =
+   *       StreamMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllMetastorePartitions(new ArrayList())
+   *           .setSkipExistingPartitions(true)
+   *           .build();
+   *   bidiStream.send(request);
+   *   for (StreamMetastorePartitionsResponse response : bidiStream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final BidiStreamingCallable< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsCallable() { + return stub.streamMetastorePartitionsCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceSettings.java new file mode 100644 index 000000000000..401b2242b8f6 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceSettings.java @@ -0,0 +1,266 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1alpha; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1alpha.stub.MetastorePartitionServiceStubSettings; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link MetastorePartitionServiceClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of batchCreateMetastorePartitions: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * MetastorePartitionServiceSettings.Builder metastorePartitionServiceSettingsBuilder =
+ *     MetastorePartitionServiceSettings.newBuilder();
+ * metastorePartitionServiceSettingsBuilder
+ *     .batchCreateMetastorePartitionsSettings()
+ *     .setRetrySettings(
+ *         metastorePartitionServiceSettingsBuilder
+ *             .batchCreateMetastorePartitionsSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * MetastorePartitionServiceSettings metastorePartitionServiceSettings =
+ *     metastorePartitionServiceSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class MetastorePartitionServiceSettings + extends ClientSettings { + + /** Returns the object with the settings used for calls to batchCreateMetastorePartitions. */ + public UnaryCallSettings< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsSettings() { + return ((MetastorePartitionServiceStubSettings) getStubSettings()) + .batchCreateMetastorePartitionsSettings(); + } + + /** Returns the object with the settings used for calls to batchDeleteMetastorePartitions. */ + public UnaryCallSettings + batchDeleteMetastorePartitionsSettings() { + return ((MetastorePartitionServiceStubSettings) getStubSettings()) + .batchDeleteMetastorePartitionsSettings(); + } + + /** Returns the object with the settings used for calls to batchUpdateMetastorePartitions. */ + public UnaryCallSettings< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsSettings() { + return ((MetastorePartitionServiceStubSettings) getStubSettings()) + .batchUpdateMetastorePartitionsSettings(); + } + + /** Returns the object with the settings used for calls to listMetastorePartitions. */ + public UnaryCallSettings + listMetastorePartitionsSettings() { + return ((MetastorePartitionServiceStubSettings) getStubSettings()) + .listMetastorePartitionsSettings(); + } + + /** Returns the object with the settings used for calls to streamMetastorePartitions. */ + public StreamingCallSettings + streamMetastorePartitionsSettings() { + return ((MetastorePartitionServiceStubSettings) getStubSettings()) + .streamMetastorePartitionsSettings(); + } + + public static final MetastorePartitionServiceSettings create( + MetastorePartitionServiceStubSettings stub) throws IOException { + return new MetastorePartitionServiceSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return MetastorePartitionServiceStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return MetastorePartitionServiceStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return MetastorePartitionServiceStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return MetastorePartitionServiceStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return MetastorePartitionServiceStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return MetastorePartitionServiceStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return MetastorePartitionServiceStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected MetastorePartitionServiceSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for MetastorePartitionServiceSettings. */ + public static class Builder + extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(MetastorePartitionServiceStubSettings.newBuilder(clientContext)); + } + + protected Builder(MetastorePartitionServiceSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(MetastorePartitionServiceStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(MetastorePartitionServiceStubSettings.newBuilder()); + } + + public MetastorePartitionServiceStubSettings.Builder getStubSettingsBuilder() { + return ((MetastorePartitionServiceStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to batchCreateMetastorePartitions. */ + public UnaryCallSettings.Builder< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsSettings() { + return getStubSettingsBuilder().batchCreateMetastorePartitionsSettings(); + } + + /** Returns the builder for the settings used for calls to batchDeleteMetastorePartitions. */ + public UnaryCallSettings.Builder + batchDeleteMetastorePartitionsSettings() { + return getStubSettingsBuilder().batchDeleteMetastorePartitionsSettings(); + } + + /** Returns the builder for the settings used for calls to batchUpdateMetastorePartitions. */ + public UnaryCallSettings.Builder< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsSettings() { + return getStubSettingsBuilder().batchUpdateMetastorePartitionsSettings(); + } + + /** Returns the builder for the settings used for calls to listMetastorePartitions. */ + public UnaryCallSettings.Builder< + ListMetastorePartitionsRequest, ListMetastorePartitionsResponse> + listMetastorePartitionsSettings() { + return getStubSettingsBuilder().listMetastorePartitionsSettings(); + } + + /** Returns the builder for the settings used for calls to streamMetastorePartitions. */ + public StreamingCallSettings.Builder< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsSettings() { + return getStubSettingsBuilder().streamMetastorePartitionsSettings(); + } + + @Override + public MetastorePartitionServiceSettings build() throws IOException { + return new MetastorePartitionServiceSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/gapic_metadata.json b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/gapic_metadata.json new file mode 100644 index 000000000000..05d6ab274992 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/gapic_metadata.json @@ -0,0 +1,33 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.cloud.bigquery.storage.v1alpha", + "libraryPackage": "com.google.cloud.bigquery.storage.v1alpha", + "services": { + "MetastorePartitionService": { + "clients": { + "grpc": { + "libraryClient": "MetastorePartitionServiceClient", + "rpcs": { + "BatchCreateMetastorePartitions": { + "methods": ["batchCreateMetastorePartitions", "batchCreateMetastorePartitionsCallable"] + }, + "BatchDeleteMetastorePartitions": { + "methods": ["batchDeleteMetastorePartitions", "batchDeleteMetastorePartitionsCallable"] + }, + "BatchUpdateMetastorePartitions": { + "methods": ["batchUpdateMetastorePartitions", "batchUpdateMetastorePartitionsCallable"] + }, + "ListMetastorePartitions": { + "methods": ["listMetastorePartitions", "listMetastorePartitions", "listMetastorePartitions", "listMetastorePartitionsCallable"] + }, + "StreamMetastorePartitions": { + "methods": ["streamMetastorePartitionsCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/package-info.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/package-info.java new file mode 100644 index 000000000000..50643a66a61c --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/package-info.java @@ -0,0 +1,53 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to BigQuery Storage API + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= MetastorePartitionServiceClient ======================= + * + *

Service Description: BigQuery Metastore Partition Service API. This service is used for + * managing metastore partitions in BigQuery metastore. The service supports only batch operations + * for write. + * + *

Sample for MetastorePartitionServiceClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+ *     MetastorePartitionServiceClient.create()) {
+ *   BatchCreateMetastorePartitionsRequest request =
+ *       BatchCreateMetastorePartitionsRequest.newBuilder()
+ *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+ *           .addAllRequests(new ArrayList())
+ *           .setSkipExistingPartitions(true)
+ *           .setTraceId("traceId-1067401920")
+ *           .build();
+ *   BatchCreateMetastorePartitionsResponse response =
+ *       metastorePartitionServiceClient.batchCreateMetastorePartitions(request);
+ * }
+ * }
+ */ +@Generated("by gapic-generator-java") +package com.google.cloud.bigquery.storage.v1alpha; + +import javax.annotation.Generated; diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceCallableFactory.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceCallableFactory.java new file mode 100644 index 000000000000..b0a49fde5332 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceCallableFactory.java @@ -0,0 +1,115 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1alpha.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the MetastorePartitionService service API. + * + *

This class is for advanced usage. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class GrpcMetastorePartitionServiceCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceStub.java new file mode 100644 index 000000000000..9342331597c0 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceStub.java @@ -0,0 +1,346 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1alpha.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the MetastorePartitionService service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class GrpcMetastorePartitionServiceStub extends MetastorePartitionServiceStub { + private static final MethodDescriptor< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsMethodDescriptor = + MethodDescriptor + . + newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1alpha.MetastorePartitionService/BatchCreateMetastorePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchCreateMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller( + BatchCreateMetastorePartitionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + batchDeleteMetastorePartitionsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1alpha.MetastorePartitionService/BatchDeleteMetastorePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchDeleteMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsMethodDescriptor = + MethodDescriptor + . + newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1alpha.MetastorePartitionService/BatchUpdateMetastorePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchUpdateMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller( + BatchUpdateMetastorePartitionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + ListMetastorePartitionsRequest, ListMetastorePartitionsResponse> + listMetastorePartitionsMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1alpha.MetastorePartitionService/ListMetastorePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(ListMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListMetastorePartitionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName( + "google.cloud.bigquery.storage.v1alpha.MetastorePartitionService/StreamMetastorePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(StreamMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(StreamMetastorePartitionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsCallable; + private final UnaryCallable + batchDeleteMetastorePartitionsCallable; + private final UnaryCallable< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsCallable; + private final UnaryCallable + listMetastorePartitionsCallable; + private final BidiStreamingCallable< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcMetastorePartitionServiceStub create( + MetastorePartitionServiceStubSettings settings) throws IOException { + return new GrpcMetastorePartitionServiceStub(settings, ClientContext.create(settings)); + } + + public static final GrpcMetastorePartitionServiceStub create(ClientContext clientContext) + throws IOException { + return new GrpcMetastorePartitionServiceStub( + MetastorePartitionServiceStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcMetastorePartitionServiceStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcMetastorePartitionServiceStub( + MetastorePartitionServiceStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcMetastorePartitionServiceStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcMetastorePartitionServiceStub( + MetastorePartitionServiceStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcMetastorePartitionServiceCallableFactory()); + } + + /** + * Constructs an instance of GrpcMetastorePartitionServiceStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcMetastorePartitionServiceStub( + MetastorePartitionServiceStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings + batchCreateMetastorePartitionsTransportSettings = + GrpcCallSettings + . + newBuilder() + .setMethodDescriptor(batchCreateMetastorePartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings + batchDeleteMetastorePartitionsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(batchDeleteMetastorePartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings + batchUpdateMetastorePartitionsTransportSettings = + GrpcCallSettings + . + newBuilder() + .setMethodDescriptor(batchUpdateMetastorePartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings + listMetastorePartitionsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(listMetastorePartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings + streamMetastorePartitionsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(streamMetastorePartitionsMethodDescriptor) + .build(); + + this.batchCreateMetastorePartitionsCallable = + callableFactory.createUnaryCallable( + batchCreateMetastorePartitionsTransportSettings, + settings.batchCreateMetastorePartitionsSettings(), + clientContext); + this.batchDeleteMetastorePartitionsCallable = + callableFactory.createUnaryCallable( + batchDeleteMetastorePartitionsTransportSettings, + settings.batchDeleteMetastorePartitionsSettings(), + clientContext); + this.batchUpdateMetastorePartitionsCallable = + callableFactory.createUnaryCallable( + batchUpdateMetastorePartitionsTransportSettings, + settings.batchUpdateMetastorePartitionsSettings(), + clientContext); + this.listMetastorePartitionsCallable = + callableFactory.createUnaryCallable( + listMetastorePartitionsTransportSettings, + settings.listMetastorePartitionsSettings(), + clientContext); + this.streamMetastorePartitionsCallable = + callableFactory.createBidiStreamingCallable( + streamMetastorePartitionsTransportSettings, + settings.streamMetastorePartitionsSettings(), + clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsCallable() { + return batchCreateMetastorePartitionsCallable; + } + + @Override + public UnaryCallable + batchDeleteMetastorePartitionsCallable() { + return batchDeleteMetastorePartitionsCallable; + } + + @Override + public UnaryCallable< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsCallable() { + return batchUpdateMetastorePartitionsCallable; + } + + @Override + public UnaryCallable + listMetastorePartitionsCallable() { + return listMetastorePartitionsCallable; + } + + @Override + public BidiStreamingCallable + streamMetastorePartitionsCallable() { + return streamMetastorePartitionsCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStub.java new file mode 100644 index 000000000000..74cb15242bba --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStub.java @@ -0,0 +1,77 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1alpha.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse; +import com.google.protobuf.Empty; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the MetastorePartitionService service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@BetaApi +@Generated("by gapic-generator-java") +public abstract class MetastorePartitionServiceStub implements BackgroundResource { + + public UnaryCallable< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsCallable() { + throw new UnsupportedOperationException( + "Not implemented: batchCreateMetastorePartitionsCallable()"); + } + + public UnaryCallable + batchDeleteMetastorePartitionsCallable() { + throw new UnsupportedOperationException( + "Not implemented: batchDeleteMetastorePartitionsCallable()"); + } + + public UnaryCallable< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsCallable() { + throw new UnsupportedOperationException( + "Not implemented: batchUpdateMetastorePartitionsCallable()"); + } + + public UnaryCallable + listMetastorePartitionsCallable() { + throw new UnsupportedOperationException("Not implemented: listMetastorePartitionsCallable()"); + } + + public BidiStreamingCallable + streamMetastorePartitionsCallable() { + throw new UnsupportedOperationException("Not implemented: streamMetastorePartitionsCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStubSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStubSettings.java new file mode 100644 index 000000000000..6e07970bbaab --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStubSettings.java @@ -0,0 +1,446 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1alpha.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link MetastorePartitionServiceStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of batchCreateMetastorePartitions: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * MetastorePartitionServiceStubSettings.Builder metastorePartitionServiceSettingsBuilder =
+ *     MetastorePartitionServiceStubSettings.newBuilder();
+ * metastorePartitionServiceSettingsBuilder
+ *     .batchCreateMetastorePartitionsSettings()
+ *     .setRetrySettings(
+ *         metastorePartitionServiceSettingsBuilder
+ *             .batchCreateMetastorePartitionsSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * MetastorePartitionServiceStubSettings metastorePartitionServiceSettings =
+ *     metastorePartitionServiceSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class MetastorePartitionServiceStubSettings + extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/bigquery") + .add("https://www.googleapis.com/auth/cloud-platform") + .build(); + + private final UnaryCallSettings< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsSettings; + private final UnaryCallSettings + batchDeleteMetastorePartitionsSettings; + private final UnaryCallSettings< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsSettings; + private final UnaryCallSettings + listMetastorePartitionsSettings; + private final StreamingCallSettings< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsSettings; + + /** Returns the object with the settings used for calls to batchCreateMetastorePartitions. */ + public UnaryCallSettings< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsSettings() { + return batchCreateMetastorePartitionsSettings; + } + + /** Returns the object with the settings used for calls to batchDeleteMetastorePartitions. */ + public UnaryCallSettings + batchDeleteMetastorePartitionsSettings() { + return batchDeleteMetastorePartitionsSettings; + } + + /** Returns the object with the settings used for calls to batchUpdateMetastorePartitions. */ + public UnaryCallSettings< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsSettings() { + return batchUpdateMetastorePartitionsSettings; + } + + /** Returns the object with the settings used for calls to listMetastorePartitions. */ + public UnaryCallSettings + listMetastorePartitionsSettings() { + return listMetastorePartitionsSettings; + } + + /** Returns the object with the settings used for calls to streamMetastorePartitions. */ + public StreamingCallSettings + streamMetastorePartitionsSettings() { + return streamMetastorePartitionsSettings; + } + + public MetastorePartitionServiceStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcMetastorePartitionServiceStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "bigquerystorage"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "bigquerystorage.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "bigquerystorage.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(MetastorePartitionServiceStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected MetastorePartitionServiceStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + batchCreateMetastorePartitionsSettings = + settingsBuilder.batchCreateMetastorePartitionsSettings().build(); + batchDeleteMetastorePartitionsSettings = + settingsBuilder.batchDeleteMetastorePartitionsSettings().build(); + batchUpdateMetastorePartitionsSettings = + settingsBuilder.batchUpdateMetastorePartitionsSettings().build(); + listMetastorePartitionsSettings = settingsBuilder.listMetastorePartitionsSettings().build(); + streamMetastorePartitionsSettings = settingsBuilder.streamMetastorePartitionsSettings().build(); + } + + /** Builder for MetastorePartitionServiceStubSettings. */ + public static class Builder + extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsSettings; + private final UnaryCallSettings.Builder + batchDeleteMetastorePartitionsSettings; + private final UnaryCallSettings.Builder< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsSettings; + private final UnaryCallSettings.Builder< + ListMetastorePartitionsRequest, ListMetastorePartitionsResponse> + listMetastorePartitionsSettings; + private final StreamingCallSettings.Builder< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_0_codes", + ImmutableSet.copyOf(Lists.newArrayList(StatusCode.Code.UNAVAILABLE))); + definitions.put( + "no_retry_1_codes", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(240000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(240000L)) + .setTotalTimeoutDuration(Duration.ofMillis(240000L)) + .build(); + definitions.put("retry_policy_0_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(240000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(240000L)) + .setTotalTimeoutDuration(Duration.ofMillis(240000L)) + .build(); + definitions.put("no_retry_1_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + batchCreateMetastorePartitionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + batchDeleteMetastorePartitionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + batchUpdateMetastorePartitionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listMetastorePartitionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + streamMetastorePartitionsSettings = StreamingCallSettings.newBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + batchCreateMetastorePartitionsSettings, + batchDeleteMetastorePartitionsSettings, + batchUpdateMetastorePartitionsSettings, + listMetastorePartitionsSettings); + initDefaults(this); + } + + protected Builder(MetastorePartitionServiceStubSettings settings) { + super(settings); + + batchCreateMetastorePartitionsSettings = + settings.batchCreateMetastorePartitionsSettings.toBuilder(); + batchDeleteMetastorePartitionsSettings = + settings.batchDeleteMetastorePartitionsSettings.toBuilder(); + batchUpdateMetastorePartitionsSettings = + settings.batchUpdateMetastorePartitionsSettings.toBuilder(); + listMetastorePartitionsSettings = settings.listMetastorePartitionsSettings.toBuilder(); + streamMetastorePartitionsSettings = settings.streamMetastorePartitionsSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + batchCreateMetastorePartitionsSettings, + batchDeleteMetastorePartitionsSettings, + batchUpdateMetastorePartitionsSettings, + listMetastorePartitionsSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .batchCreateMetastorePartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .batchDeleteMetastorePartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .batchUpdateMetastorePartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listMetastorePartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to batchCreateMetastorePartitions. */ + public UnaryCallSettings.Builder< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsSettings() { + return batchCreateMetastorePartitionsSettings; + } + + /** Returns the builder for the settings used for calls to batchDeleteMetastorePartitions. */ + public UnaryCallSettings.Builder + batchDeleteMetastorePartitionsSettings() { + return batchDeleteMetastorePartitionsSettings; + } + + /** Returns the builder for the settings used for calls to batchUpdateMetastorePartitions. */ + public UnaryCallSettings.Builder< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsSettings() { + return batchUpdateMetastorePartitionsSettings; + } + + /** Returns the builder for the settings used for calls to listMetastorePartitions. */ + public UnaryCallSettings.Builder< + ListMetastorePartitionsRequest, ListMetastorePartitionsResponse> + listMetastorePartitionsSettings() { + return listMetastorePartitionsSettings; + } + + /** Returns the builder for the settings used for calls to streamMetastorePartitions. */ + public StreamingCallSettings.Builder< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsSettings() { + return streamMetastorePartitionsSettings; + } + + @Override + public MetastorePartitionServiceStubSettings build() throws IOException { + return new MetastorePartitionServiceStubSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClient.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClient.java new file mode 100644 index 000000000000..773e361c95d7 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClient.java @@ -0,0 +1,634 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta.stub.MetastorePartitionServiceStub; +import com.google.cloud.bigquery.storage.v1beta.stub.MetastorePartitionServiceStubSettings; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: BigQuery Metastore Partition Service API. This service is used for managing + * metastore partitions in BigQuery metastore. The service supports only batch operations for write. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+ *     MetastorePartitionServiceClient.create()) {
+ *   BatchCreateMetastorePartitionsRequest request =
+ *       BatchCreateMetastorePartitionsRequest.newBuilder()
+ *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+ *           .addAllRequests(new ArrayList())
+ *           .setSkipExistingPartitions(true)
+ *           .setTraceId("traceId-1067401920")
+ *           .build();
+ *   BatchCreateMetastorePartitionsResponse response =
+ *       metastorePartitionServiceClient.batchCreateMetastorePartitions(request);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the MetastorePartitionServiceClient object to clean up + * resources such as threads. In the example above, try-with-resources is used, which automatically + * calls close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

BatchCreateMetastorePartitions

Adds metastore partitions to a table.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchCreateMetastorePartitions(BatchCreateMetastorePartitionsRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchCreateMetastorePartitionsCallable() + *

+ *

BatchDeleteMetastorePartitions

Deletes metastore partitions from a table.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchDeleteMetastorePartitions(BatchDeleteMetastorePartitionsRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchDeleteMetastorePartitionsCallable() + *

+ *

BatchUpdateMetastorePartitions

Updates metastore partitions in a table.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchUpdateMetastorePartitions(BatchUpdateMetastorePartitionsRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchUpdateMetastorePartitionsCallable() + *

+ *

ListMetastorePartitions

Gets metastore partitions from a table.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listMetastorePartitions(ListMetastorePartitionsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listMetastorePartitions(TableName parent) + *

  • listMetastorePartitions(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listMetastorePartitionsCallable() + *

+ *

StreamMetastorePartitions

This is a bi-di streaming rpc method that allows the client to send a stream of partitions and commit all of them atomically at the end. If the commit is successful, the server will return a response and close the stream. If the commit fails (due to duplicate partitions or other reason), the server will close the stream with an error. This method is only available via the gRPC API (not REST).

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • streamMetastorePartitionsCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of + * MetastorePartitionServiceSettings to create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * MetastorePartitionServiceSettings metastorePartitionServiceSettings =
+ *     MetastorePartitionServiceSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * MetastorePartitionServiceClient metastorePartitionServiceClient =
+ *     MetastorePartitionServiceClient.create(metastorePartitionServiceSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * MetastorePartitionServiceSettings metastorePartitionServiceSettings =
+ *     MetastorePartitionServiceSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * MetastorePartitionServiceClient metastorePartitionServiceClient =
+ *     MetastorePartitionServiceClient.create(metastorePartitionServiceSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class MetastorePartitionServiceClient implements BackgroundResource { + private final MetastorePartitionServiceSettings settings; + private final MetastorePartitionServiceStub stub; + + /** Constructs an instance of MetastorePartitionServiceClient with default settings. */ + public static final MetastorePartitionServiceClient create() throws IOException { + return create(MetastorePartitionServiceSettings.newBuilder().build()); + } + + /** + * Constructs an instance of MetastorePartitionServiceClient, using the given settings. The + * channels are created based on the settings passed in, or defaults for any settings that are not + * set. + */ + public static final MetastorePartitionServiceClient create( + MetastorePartitionServiceSettings settings) throws IOException { + return new MetastorePartitionServiceClient(settings); + } + + /** + * Constructs an instance of MetastorePartitionServiceClient, using the given stub for making + * calls. This is for advanced usage - prefer using create(MetastorePartitionServiceSettings). + */ + public static final MetastorePartitionServiceClient create(MetastorePartitionServiceStub stub) { + return new MetastorePartitionServiceClient(stub); + } + + /** + * Constructs an instance of MetastorePartitionServiceClient, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected MetastorePartitionServiceClient(MetastorePartitionServiceSettings settings) + throws IOException { + this.settings = settings; + this.stub = ((MetastorePartitionServiceStubSettings) settings.getStubSettings()).createStub(); + } + + protected MetastorePartitionServiceClient(MetastorePartitionServiceStub stub) { + this.settings = null; + this.stub = stub; + } + + public final MetastorePartitionServiceSettings getSettings() { + return settings; + } + + public MetastorePartitionServiceStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Adds metastore partitions to a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BatchCreateMetastorePartitionsRequest request =
+   *       BatchCreateMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllRequests(new ArrayList())
+   *           .setSkipExistingPartitions(true)
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   BatchCreateMetastorePartitionsResponse response =
+   *       metastorePartitionServiceClient.batchCreateMetastorePartitions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCreateMetastorePartitionsResponse batchCreateMetastorePartitions( + BatchCreateMetastorePartitionsRequest request) { + return batchCreateMetastorePartitionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Adds metastore partitions to a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BatchCreateMetastorePartitionsRequest request =
+   *       BatchCreateMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllRequests(new ArrayList())
+   *           .setSkipExistingPartitions(true)
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   ApiFuture future =
+   *       metastorePartitionServiceClient
+   *           .batchCreateMetastorePartitionsCallable()
+   *           .futureCall(request);
+   *   // Do something.
+   *   BatchCreateMetastorePartitionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsCallable() { + return stub.batchCreateMetastorePartitionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes metastore partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BatchDeleteMetastorePartitionsRequest request =
+   *       BatchDeleteMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllPartitionValues(new ArrayList())
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   metastorePartitionServiceClient.batchDeleteMetastorePartitions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void batchDeleteMetastorePartitions(BatchDeleteMetastorePartitionsRequest request) { + batchDeleteMetastorePartitionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes metastore partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BatchDeleteMetastorePartitionsRequest request =
+   *       BatchDeleteMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllPartitionValues(new ArrayList())
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   ApiFuture future =
+   *       metastorePartitionServiceClient
+   *           .batchDeleteMetastorePartitionsCallable()
+   *           .futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + batchDeleteMetastorePartitionsCallable() { + return stub.batchDeleteMetastorePartitionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates metastore partitions in a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BatchUpdateMetastorePartitionsRequest request =
+   *       BatchUpdateMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllRequests(new ArrayList())
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   BatchUpdateMetastorePartitionsResponse response =
+   *       metastorePartitionServiceClient.batchUpdateMetastorePartitions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchUpdateMetastorePartitionsResponse batchUpdateMetastorePartitions( + BatchUpdateMetastorePartitionsRequest request) { + return batchUpdateMetastorePartitionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates metastore partitions in a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BatchUpdateMetastorePartitionsRequest request =
+   *       BatchUpdateMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllRequests(new ArrayList())
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   ApiFuture future =
+   *       metastorePartitionServiceClient
+   *           .batchUpdateMetastorePartitionsCallable()
+   *           .futureCall(request);
+   *   // Do something.
+   *   BatchUpdateMetastorePartitionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsCallable() { + return stub.batchUpdateMetastorePartitionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets metastore partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
+   *   ListMetastorePartitionsResponse response =
+   *       metastorePartitionServiceClient.listMetastorePartitions(parent);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which these metastore partitions belong, in + * the format of projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListMetastorePartitionsResponse listMetastorePartitions(TableName parent) { + ListMetastorePartitionsRequest request = + ListMetastorePartitionsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listMetastorePartitions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets metastore partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   String parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString();
+   *   ListMetastorePartitionsResponse response =
+   *       metastorePartitionServiceClient.listMetastorePartitions(parent);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which these metastore partitions belong, in + * the format of projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListMetastorePartitionsResponse listMetastorePartitions(String parent) { + ListMetastorePartitionsRequest request = + ListMetastorePartitionsRequest.newBuilder().setParent(parent).build(); + return listMetastorePartitions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets metastore partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   ListMetastorePartitionsRequest request =
+   *       ListMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   ListMetastorePartitionsResponse response =
+   *       metastorePartitionServiceClient.listMetastorePartitions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListMetastorePartitionsResponse listMetastorePartitions( + ListMetastorePartitionsRequest request) { + return listMetastorePartitionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets metastore partitions from a table. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   ListMetastorePartitionsRequest request =
+   *       ListMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   ApiFuture future =
+   *       metastorePartitionServiceClient.listMetastorePartitionsCallable().futureCall(request);
+   *   // Do something.
+   *   ListMetastorePartitionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + listMetastorePartitionsCallable() { + return stub.listMetastorePartitionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * This is a bi-di streaming rpc method that allows the client to send a stream of partitions and + * commit all of them atomically at the end. If the commit is successful, the server will return a + * response and close the stream. If the commit fails (due to duplicate partitions or other + * reason), the server will close the stream with an error. This method is only available via the + * gRPC API (not REST). + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+   *     MetastorePartitionServiceClient.create()) {
+   *   BidiStream bidiStream =
+   *       metastorePartitionServiceClient.streamMetastorePartitionsCallable().call();
+   *   StreamMetastorePartitionsRequest request =
+   *       StreamMetastorePartitionsRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .addAllMetastorePartitions(new ArrayList())
+   *           .setSkipExistingPartitions(true)
+   *           .build();
+   *   bidiStream.send(request);
+   *   for (StreamMetastorePartitionsResponse response : bidiStream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final BidiStreamingCallable< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsCallable() { + return stub.streamMetastorePartitionsCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceSettings.java new file mode 100644 index 000000000000..dba660ff05f2 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceSettings.java @@ -0,0 +1,266 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta.stub.MetastorePartitionServiceStubSettings; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link MetastorePartitionServiceClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of batchCreateMetastorePartitions: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * MetastorePartitionServiceSettings.Builder metastorePartitionServiceSettingsBuilder =
+ *     MetastorePartitionServiceSettings.newBuilder();
+ * metastorePartitionServiceSettingsBuilder
+ *     .batchCreateMetastorePartitionsSettings()
+ *     .setRetrySettings(
+ *         metastorePartitionServiceSettingsBuilder
+ *             .batchCreateMetastorePartitionsSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * MetastorePartitionServiceSettings metastorePartitionServiceSettings =
+ *     metastorePartitionServiceSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class MetastorePartitionServiceSettings + extends ClientSettings { + + /** Returns the object with the settings used for calls to batchCreateMetastorePartitions. */ + public UnaryCallSettings< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsSettings() { + return ((MetastorePartitionServiceStubSettings) getStubSettings()) + .batchCreateMetastorePartitionsSettings(); + } + + /** Returns the object with the settings used for calls to batchDeleteMetastorePartitions. */ + public UnaryCallSettings + batchDeleteMetastorePartitionsSettings() { + return ((MetastorePartitionServiceStubSettings) getStubSettings()) + .batchDeleteMetastorePartitionsSettings(); + } + + /** Returns the object with the settings used for calls to batchUpdateMetastorePartitions. */ + public UnaryCallSettings< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsSettings() { + return ((MetastorePartitionServiceStubSettings) getStubSettings()) + .batchUpdateMetastorePartitionsSettings(); + } + + /** Returns the object with the settings used for calls to listMetastorePartitions. */ + public UnaryCallSettings + listMetastorePartitionsSettings() { + return ((MetastorePartitionServiceStubSettings) getStubSettings()) + .listMetastorePartitionsSettings(); + } + + /** Returns the object with the settings used for calls to streamMetastorePartitions. */ + public StreamingCallSettings + streamMetastorePartitionsSettings() { + return ((MetastorePartitionServiceStubSettings) getStubSettings()) + .streamMetastorePartitionsSettings(); + } + + public static final MetastorePartitionServiceSettings create( + MetastorePartitionServiceStubSettings stub) throws IOException { + return new MetastorePartitionServiceSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return MetastorePartitionServiceStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return MetastorePartitionServiceStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return MetastorePartitionServiceStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return MetastorePartitionServiceStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return MetastorePartitionServiceStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return MetastorePartitionServiceStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return MetastorePartitionServiceStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected MetastorePartitionServiceSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for MetastorePartitionServiceSettings. */ + public static class Builder + extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(MetastorePartitionServiceStubSettings.newBuilder(clientContext)); + } + + protected Builder(MetastorePartitionServiceSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(MetastorePartitionServiceStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(MetastorePartitionServiceStubSettings.newBuilder()); + } + + public MetastorePartitionServiceStubSettings.Builder getStubSettingsBuilder() { + return ((MetastorePartitionServiceStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to batchCreateMetastorePartitions. */ + public UnaryCallSettings.Builder< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsSettings() { + return getStubSettingsBuilder().batchCreateMetastorePartitionsSettings(); + } + + /** Returns the builder for the settings used for calls to batchDeleteMetastorePartitions. */ + public UnaryCallSettings.Builder + batchDeleteMetastorePartitionsSettings() { + return getStubSettingsBuilder().batchDeleteMetastorePartitionsSettings(); + } + + /** Returns the builder for the settings used for calls to batchUpdateMetastorePartitions. */ + public UnaryCallSettings.Builder< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsSettings() { + return getStubSettingsBuilder().batchUpdateMetastorePartitionsSettings(); + } + + /** Returns the builder for the settings used for calls to listMetastorePartitions. */ + public UnaryCallSettings.Builder< + ListMetastorePartitionsRequest, ListMetastorePartitionsResponse> + listMetastorePartitionsSettings() { + return getStubSettingsBuilder().listMetastorePartitionsSettings(); + } + + /** Returns the builder for the settings used for calls to streamMetastorePartitions. */ + public StreamingCallSettings.Builder< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsSettings() { + return getStubSettingsBuilder().streamMetastorePartitionsSettings(); + } + + @Override + public MetastorePartitionServiceSettings build() throws IOException { + return new MetastorePartitionServiceSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/gapic_metadata.json b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/gapic_metadata.json new file mode 100644 index 000000000000..60151bde7f69 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/gapic_metadata.json @@ -0,0 +1,33 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.cloud.bigquery.storage.v1beta", + "libraryPackage": "com.google.cloud.bigquery.storage.v1beta", + "services": { + "MetastorePartitionService": { + "clients": { + "grpc": { + "libraryClient": "MetastorePartitionServiceClient", + "rpcs": { + "BatchCreateMetastorePartitions": { + "methods": ["batchCreateMetastorePartitions", "batchCreateMetastorePartitionsCallable"] + }, + "BatchDeleteMetastorePartitions": { + "methods": ["batchDeleteMetastorePartitions", "batchDeleteMetastorePartitionsCallable"] + }, + "BatchUpdateMetastorePartitions": { + "methods": ["batchUpdateMetastorePartitions", "batchUpdateMetastorePartitionsCallable"] + }, + "ListMetastorePartitions": { + "methods": ["listMetastorePartitions", "listMetastorePartitions", "listMetastorePartitions", "listMetastorePartitionsCallable"] + }, + "StreamMetastorePartitions": { + "methods": ["streamMetastorePartitionsCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/package-info.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/package-info.java new file mode 100644 index 000000000000..e8bba016cc6b --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/package-info.java @@ -0,0 +1,53 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to BigQuery Storage API + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= MetastorePartitionServiceClient ======================= + * + *

Service Description: BigQuery Metastore Partition Service API. This service is used for + * managing metastore partitions in BigQuery metastore. The service supports only batch operations + * for write. + * + *

Sample for MetastorePartitionServiceClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (MetastorePartitionServiceClient metastorePartitionServiceClient =
+ *     MetastorePartitionServiceClient.create()) {
+ *   BatchCreateMetastorePartitionsRequest request =
+ *       BatchCreateMetastorePartitionsRequest.newBuilder()
+ *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+ *           .addAllRequests(new ArrayList())
+ *           .setSkipExistingPartitions(true)
+ *           .setTraceId("traceId-1067401920")
+ *           .build();
+ *   BatchCreateMetastorePartitionsResponse response =
+ *       metastorePartitionServiceClient.batchCreateMetastorePartitions(request);
+ * }
+ * }
+ */ +@Generated("by gapic-generator-java") +package com.google.cloud.bigquery.storage.v1beta; + +import javax.annotation.Generated; diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceCallableFactory.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceCallableFactory.java new file mode 100644 index 000000000000..e23f847dd55d --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceCallableFactory.java @@ -0,0 +1,115 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the MetastorePartitionService service API. + * + *

This class is for advanced usage. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class GrpcMetastorePartitionServiceCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceStub.java new file mode 100644 index 000000000000..e1f32e936248 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceStub.java @@ -0,0 +1,346 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the MetastorePartitionService service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class GrpcMetastorePartitionServiceStub extends MetastorePartitionServiceStub { + private static final MethodDescriptor< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsMethodDescriptor = + MethodDescriptor + . + newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta.MetastorePartitionService/BatchCreateMetastorePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchCreateMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller( + BatchCreateMetastorePartitionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + batchDeleteMetastorePartitionsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta.MetastorePartitionService/BatchDeleteMetastorePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchDeleteMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsMethodDescriptor = + MethodDescriptor + . + newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta.MetastorePartitionService/BatchUpdateMetastorePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchUpdateMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller( + BatchUpdateMetastorePartitionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + ListMetastorePartitionsRequest, ListMetastorePartitionsResponse> + listMetastorePartitionsMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta.MetastorePartitionService/ListMetastorePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(ListMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListMetastorePartitionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta.MetastorePartitionService/StreamMetastorePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(StreamMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(StreamMetastorePartitionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsCallable; + private final UnaryCallable + batchDeleteMetastorePartitionsCallable; + private final UnaryCallable< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsCallable; + private final UnaryCallable + listMetastorePartitionsCallable; + private final BidiStreamingCallable< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcMetastorePartitionServiceStub create( + MetastorePartitionServiceStubSettings settings) throws IOException { + return new GrpcMetastorePartitionServiceStub(settings, ClientContext.create(settings)); + } + + public static final GrpcMetastorePartitionServiceStub create(ClientContext clientContext) + throws IOException { + return new GrpcMetastorePartitionServiceStub( + MetastorePartitionServiceStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcMetastorePartitionServiceStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcMetastorePartitionServiceStub( + MetastorePartitionServiceStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcMetastorePartitionServiceStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcMetastorePartitionServiceStub( + MetastorePartitionServiceStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcMetastorePartitionServiceCallableFactory()); + } + + /** + * Constructs an instance of GrpcMetastorePartitionServiceStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcMetastorePartitionServiceStub( + MetastorePartitionServiceStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings + batchCreateMetastorePartitionsTransportSettings = + GrpcCallSettings + . + newBuilder() + .setMethodDescriptor(batchCreateMetastorePartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings + batchDeleteMetastorePartitionsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(batchDeleteMetastorePartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings + batchUpdateMetastorePartitionsTransportSettings = + GrpcCallSettings + . + newBuilder() + .setMethodDescriptor(batchUpdateMetastorePartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings + listMetastorePartitionsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(listMetastorePartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings + streamMetastorePartitionsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(streamMetastorePartitionsMethodDescriptor) + .build(); + + this.batchCreateMetastorePartitionsCallable = + callableFactory.createUnaryCallable( + batchCreateMetastorePartitionsTransportSettings, + settings.batchCreateMetastorePartitionsSettings(), + clientContext); + this.batchDeleteMetastorePartitionsCallable = + callableFactory.createUnaryCallable( + batchDeleteMetastorePartitionsTransportSettings, + settings.batchDeleteMetastorePartitionsSettings(), + clientContext); + this.batchUpdateMetastorePartitionsCallable = + callableFactory.createUnaryCallable( + batchUpdateMetastorePartitionsTransportSettings, + settings.batchUpdateMetastorePartitionsSettings(), + clientContext); + this.listMetastorePartitionsCallable = + callableFactory.createUnaryCallable( + listMetastorePartitionsTransportSettings, + settings.listMetastorePartitionsSettings(), + clientContext); + this.streamMetastorePartitionsCallable = + callableFactory.createBidiStreamingCallable( + streamMetastorePartitionsTransportSettings, + settings.streamMetastorePartitionsSettings(), + clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsCallable() { + return batchCreateMetastorePartitionsCallable; + } + + @Override + public UnaryCallable + batchDeleteMetastorePartitionsCallable() { + return batchDeleteMetastorePartitionsCallable; + } + + @Override + public UnaryCallable< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsCallable() { + return batchUpdateMetastorePartitionsCallable; + } + + @Override + public UnaryCallable + listMetastorePartitionsCallable() { + return listMetastorePartitionsCallable; + } + + @Override + public BidiStreamingCallable + streamMetastorePartitionsCallable() { + return streamMetastorePartitionsCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStub.java new file mode 100644 index 000000000000..db9a876a64ee --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStub.java @@ -0,0 +1,77 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse; +import com.google.protobuf.Empty; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the MetastorePartitionService service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@BetaApi +@Generated("by gapic-generator-java") +public abstract class MetastorePartitionServiceStub implements BackgroundResource { + + public UnaryCallable< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsCallable() { + throw new UnsupportedOperationException( + "Not implemented: batchCreateMetastorePartitionsCallable()"); + } + + public UnaryCallable + batchDeleteMetastorePartitionsCallable() { + throw new UnsupportedOperationException( + "Not implemented: batchDeleteMetastorePartitionsCallable()"); + } + + public UnaryCallable< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsCallable() { + throw new UnsupportedOperationException( + "Not implemented: batchUpdateMetastorePartitionsCallable()"); + } + + public UnaryCallable + listMetastorePartitionsCallable() { + throw new UnsupportedOperationException("Not implemented: listMetastorePartitionsCallable()"); + } + + public BidiStreamingCallable + streamMetastorePartitionsCallable() { + throw new UnsupportedOperationException("Not implemented: streamMetastorePartitionsCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStubSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStubSettings.java new file mode 100644 index 000000000000..96cd9e15fd4f --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStubSettings.java @@ -0,0 +1,446 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse; +import com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest; +import com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link MetastorePartitionServiceStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of batchCreateMetastorePartitions: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * MetastorePartitionServiceStubSettings.Builder metastorePartitionServiceSettingsBuilder =
+ *     MetastorePartitionServiceStubSettings.newBuilder();
+ * metastorePartitionServiceSettingsBuilder
+ *     .batchCreateMetastorePartitionsSettings()
+ *     .setRetrySettings(
+ *         metastorePartitionServiceSettingsBuilder
+ *             .batchCreateMetastorePartitionsSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * MetastorePartitionServiceStubSettings metastorePartitionServiceSettings =
+ *     metastorePartitionServiceSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class MetastorePartitionServiceStubSettings + extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/bigquery") + .add("https://www.googleapis.com/auth/cloud-platform") + .build(); + + private final UnaryCallSettings< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsSettings; + private final UnaryCallSettings + batchDeleteMetastorePartitionsSettings; + private final UnaryCallSettings< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsSettings; + private final UnaryCallSettings + listMetastorePartitionsSettings; + private final StreamingCallSettings< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsSettings; + + /** Returns the object with the settings used for calls to batchCreateMetastorePartitions. */ + public UnaryCallSettings< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsSettings() { + return batchCreateMetastorePartitionsSettings; + } + + /** Returns the object with the settings used for calls to batchDeleteMetastorePartitions. */ + public UnaryCallSettings + batchDeleteMetastorePartitionsSettings() { + return batchDeleteMetastorePartitionsSettings; + } + + /** Returns the object with the settings used for calls to batchUpdateMetastorePartitions. */ + public UnaryCallSettings< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsSettings() { + return batchUpdateMetastorePartitionsSettings; + } + + /** Returns the object with the settings used for calls to listMetastorePartitions. */ + public UnaryCallSettings + listMetastorePartitionsSettings() { + return listMetastorePartitionsSettings; + } + + /** Returns the object with the settings used for calls to streamMetastorePartitions. */ + public StreamingCallSettings + streamMetastorePartitionsSettings() { + return streamMetastorePartitionsSettings; + } + + public MetastorePartitionServiceStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcMetastorePartitionServiceStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "bigquerystorage"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "bigquerystorage.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "bigquerystorage.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(MetastorePartitionServiceStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected MetastorePartitionServiceStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + batchCreateMetastorePartitionsSettings = + settingsBuilder.batchCreateMetastorePartitionsSettings().build(); + batchDeleteMetastorePartitionsSettings = + settingsBuilder.batchDeleteMetastorePartitionsSettings().build(); + batchUpdateMetastorePartitionsSettings = + settingsBuilder.batchUpdateMetastorePartitionsSettings().build(); + listMetastorePartitionsSettings = settingsBuilder.listMetastorePartitionsSettings().build(); + streamMetastorePartitionsSettings = settingsBuilder.streamMetastorePartitionsSettings().build(); + } + + /** Builder for MetastorePartitionServiceStubSettings. */ + public static class Builder + extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsSettings; + private final UnaryCallSettings.Builder + batchDeleteMetastorePartitionsSettings; + private final UnaryCallSettings.Builder< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsSettings; + private final UnaryCallSettings.Builder< + ListMetastorePartitionsRequest, ListMetastorePartitionsResponse> + listMetastorePartitionsSettings; + private final StreamingCallSettings.Builder< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_0_codes", + ImmutableSet.copyOf(Lists.newArrayList(StatusCode.Code.UNAVAILABLE))); + definitions.put( + "no_retry_1_codes", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(240000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(240000L)) + .setTotalTimeoutDuration(Duration.ofMillis(240000L)) + .build(); + definitions.put("retry_policy_0_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(240000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(240000L)) + .setTotalTimeoutDuration(Duration.ofMillis(240000L)) + .build(); + definitions.put("no_retry_1_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + batchCreateMetastorePartitionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + batchDeleteMetastorePartitionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + batchUpdateMetastorePartitionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listMetastorePartitionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + streamMetastorePartitionsSettings = StreamingCallSettings.newBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + batchCreateMetastorePartitionsSettings, + batchDeleteMetastorePartitionsSettings, + batchUpdateMetastorePartitionsSettings, + listMetastorePartitionsSettings); + initDefaults(this); + } + + protected Builder(MetastorePartitionServiceStubSettings settings) { + super(settings); + + batchCreateMetastorePartitionsSettings = + settings.batchCreateMetastorePartitionsSettings.toBuilder(); + batchDeleteMetastorePartitionsSettings = + settings.batchDeleteMetastorePartitionsSettings.toBuilder(); + batchUpdateMetastorePartitionsSettings = + settings.batchUpdateMetastorePartitionsSettings.toBuilder(); + listMetastorePartitionsSettings = settings.listMetastorePartitionsSettings.toBuilder(); + streamMetastorePartitionsSettings = settings.streamMetastorePartitionsSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + batchCreateMetastorePartitionsSettings, + batchDeleteMetastorePartitionsSettings, + batchUpdateMetastorePartitionsSettings, + listMetastorePartitionsSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .batchCreateMetastorePartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .batchDeleteMetastorePartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .batchUpdateMetastorePartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listMetastorePartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to batchCreateMetastorePartitions. */ + public UnaryCallSettings.Builder< + BatchCreateMetastorePartitionsRequest, BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitionsSettings() { + return batchCreateMetastorePartitionsSettings; + } + + /** Returns the builder for the settings used for calls to batchDeleteMetastorePartitions. */ + public UnaryCallSettings.Builder + batchDeleteMetastorePartitionsSettings() { + return batchDeleteMetastorePartitionsSettings; + } + + /** Returns the builder for the settings used for calls to batchUpdateMetastorePartitions. */ + public UnaryCallSettings.Builder< + BatchUpdateMetastorePartitionsRequest, BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitionsSettings() { + return batchUpdateMetastorePartitionsSettings; + } + + /** Returns the builder for the settings used for calls to listMetastorePartitions. */ + public UnaryCallSettings.Builder< + ListMetastorePartitionsRequest, ListMetastorePartitionsResponse> + listMetastorePartitionsSettings() { + return listMetastorePartitionsSettings; + } + + /** Returns the builder for the settings used for calls to streamMetastorePartitions. */ + public StreamingCallSettings.Builder< + StreamMetastorePartitionsRequest, StreamMetastorePartitionsResponse> + streamMetastorePartitionsSettings() { + return streamMetastorePartitionsSettings; + } + + @Override + public MetastorePartitionServiceStubSettings build() throws IOException { + return new MetastorePartitionServiceStubSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClient.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClient.java new file mode 100644 index 000000000000..70b26e22755a --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClient.java @@ -0,0 +1,838 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta1.stub.BigQueryStorageStub; +import com.google.cloud.bigquery.storage.v1beta1.stub.BigQueryStorageStubSettings; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: BigQuery storage API. + * + *

The BigQuery storage API can be used to read data stored in BigQuery. + * + *

The v1beta1 API is not yet officially deprecated, and will go through a full deprecation cycle + * (https://cloud.google.com/products#product-launch-stages) before the service is turned down. + * However, new code should use the v1 API going forward. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+ *   TableReferenceProto.TableReference tableReference =
+ *       TableReferenceProto.TableReference.newBuilder().build();
+ *   ProjectName parent = ProjectName.of("[PROJECT]");
+ *   int requestedStreams = 1017221410;
+ *   Storage.ReadSession response =
+ *       baseBigQueryStorageClient.createReadSession(tableReference, parent, requestedStreams);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the BaseBigQueryStorageClient object to clean up resources + * such as threads. In the example above, try-with-resources is used, which automatically calls + * close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

CreateReadSession

Creates a new read session. A read session divides the contents of a BigQuery table into one or more streams, which can then be used to read data from the table. The read session also specifies properties of the data to be read, such as a list of columns or a push-down filter describing the rows to be returned. + *

A particular row can be read by at most one stream. When the caller has reached the end of each stream in the session, then all the data in the table has been read. + *

Read sessions automatically expire 6 hours after they are created and do not require manual clean-up by the caller.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createReadSession(TableReferenceProto.TableReference tableReference, ProjectName parent, int requestedStreams) + *

  • createReadSession(TableReferenceProto.TableReference tableReference, String parent, int requestedStreams) + *

  • createReadSession(Storage.CreateReadSessionRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createReadSessionCallable() + *

+ *

ReadRows

Reads rows from the table in the format prescribed by the read session. Each response contains one or more table rows, up to a maximum of 10 MiB per response; read requests which attempt to read individual rows larger than this will fail. + *

Each request also returns a set of stream statistics reflecting the estimated total number of rows in the read stream. This number is computed based on the total table size and the number of active streams in the read session, and may change as other streams continue to read data.

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • readRowsCallable() + *

+ *

BatchCreateReadSessionStreams

Creates additional streams for a ReadSession. This API can be used to dynamically adjust the parallelism of a batch processing task upwards by adding additional workers.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchCreateReadSessionStreams(Storage.ReadSession session, int requestedStreams) + *

  • batchCreateReadSessionStreams(Storage.BatchCreateReadSessionStreamsRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchCreateReadSessionStreamsCallable() + *

+ *

FinalizeStream

Causes a single stream in a ReadSession to gracefully stop. This API can be used to dynamically adjust the parallelism of a batch processing task downwards without losing data. + *

This API does not delete the stream -- it remains visible in the ReadSession, and any data processed by the stream is not released to other streams. However, no additional data will be assigned to the stream once this call completes. Callers must continue reading data on the stream until the end of the stream is reached so that data which has already been assigned to the stream will be processed. + *

This method will return an error if there are no other live streams in the Session, or if SplitReadStream() has been called on the given Stream.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • finalizeStream(Storage.FinalizeStreamRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • finalizeStream(Storage.Stream stream) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • finalizeStreamCallable() + *

+ *

SplitReadStream

Splits a given read stream into two Streams. These streams are referred to as the primary and the residual of the split. The original stream can still be read from in the same manner as before. Both of the returned streams can also be read from, and the total rows return by both child streams will be the same as the rows read from the original stream. + *

Moreover, the two child streams will be allocated back to back in the original Stream. Concretely, it is guaranteed that for streams Original, Primary, and Residual, that Original[0-j] = Primary[0-j] and Original[j-n] = Residual[0-m] once the streams have been read to completion. + *

This method is guaranteed to be idempotent.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • splitReadStream(Storage.SplitReadStreamRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • splitReadStream(Storage.Stream originalStream) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • splitReadStreamCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of BaseBigQueryStorageSettings to + * create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BaseBigQueryStorageSettings baseBigQueryStorageSettings =
+ *     BaseBigQueryStorageSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * BaseBigQueryStorageClient baseBigQueryStorageClient =
+ *     BaseBigQueryStorageClient.create(baseBigQueryStorageSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BaseBigQueryStorageSettings baseBigQueryStorageSettings =
+ *     BaseBigQueryStorageSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * BaseBigQueryStorageClient baseBigQueryStorageClient =
+ *     BaseBigQueryStorageClient.create(baseBigQueryStorageSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class BaseBigQueryStorageClient implements BackgroundResource { + private final BaseBigQueryStorageSettings settings; + private final BigQueryStorageStub stub; + + /** Constructs an instance of BaseBigQueryStorageClient with default settings. */ + public static final BaseBigQueryStorageClient create() throws IOException { + return create(BaseBigQueryStorageSettings.newBuilder().build()); + } + + /** + * Constructs an instance of BaseBigQueryStorageClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final BaseBigQueryStorageClient create(BaseBigQueryStorageSettings settings) + throws IOException { + return new BaseBigQueryStorageClient(settings); + } + + /** + * Constructs an instance of BaseBigQueryStorageClient, using the given stub for making calls. + * This is for advanced usage - prefer using create(BaseBigQueryStorageSettings). + */ + public static final BaseBigQueryStorageClient create(BigQueryStorageStub stub) { + return new BaseBigQueryStorageClient(stub); + } + + /** + * Constructs an instance of BaseBigQueryStorageClient, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected BaseBigQueryStorageClient(BaseBigQueryStorageSettings settings) throws IOException { + this.settings = settings; + this.stub = ((BigQueryStorageStubSettings) settings.getStubSettings()).createStub(); + } + + protected BaseBigQueryStorageClient(BigQueryStorageStub stub) { + this.settings = null; + this.stub = stub; + } + + public final BaseBigQueryStorageSettings getSettings() { + return settings; + } + + public BigQueryStorageStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Read sessions automatically expire 6 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   TableReferenceProto.TableReference tableReference =
+   *       TableReferenceProto.TableReference.newBuilder().build();
+   *   ProjectName parent = ProjectName.of("[PROJECT]");
+   *   int requestedStreams = 1017221410;
+   *   Storage.ReadSession response =
+   *       baseBigQueryStorageClient.createReadSession(tableReference, parent, requestedStreams);
+   * }
+   * }
+ * + * @param tableReference Required. Reference to the table to read. + * @param parent Required. String of the form `projects/{project_id}` indicating the project this + * ReadSession is associated with. This is the project that will be billed for usage. + * @param requestedStreams Initial number of streams. If unset or 0, we will provide a value of + * streams so as to produce reasonable throughput. Must be non-negative. The number of streams + * may be lower than the requested number, depending on the amount parallelism that is + * reasonable for the table and the maximum amount of parallelism allowed by the system. + *

Streams must be read starting from offset 0. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Storage.ReadSession createReadSession( + TableReferenceProto.TableReference tableReference, ProjectName parent, int requestedStreams) { + Storage.CreateReadSessionRequest request = + Storage.CreateReadSessionRequest.newBuilder() + .setTableReference(tableReference) + .setParent(parent == null ? null : parent.toString()) + .setRequestedStreams(requestedStreams) + .build(); + return createReadSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Read sessions automatically expire 6 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   TableReferenceProto.TableReference tableReference =
+   *       TableReferenceProto.TableReference.newBuilder().build();
+   *   String parent = ProjectName.of("[PROJECT]").toString();
+   *   int requestedStreams = 1017221410;
+   *   Storage.ReadSession response =
+   *       baseBigQueryStorageClient.createReadSession(tableReference, parent, requestedStreams);
+   * }
+   * }
+ * + * @param tableReference Required. Reference to the table to read. + * @param parent Required. String of the form `projects/{project_id}` indicating the project this + * ReadSession is associated with. This is the project that will be billed for usage. + * @param requestedStreams Initial number of streams. If unset or 0, we will provide a value of + * streams so as to produce reasonable throughput. Must be non-negative. The number of streams + * may be lower than the requested number, depending on the amount parallelism that is + * reasonable for the table and the maximum amount of parallelism allowed by the system. + *

Streams must be read starting from offset 0. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Storage.ReadSession createReadSession( + TableReferenceProto.TableReference tableReference, String parent, int requestedStreams) { + Storage.CreateReadSessionRequest request = + Storage.CreateReadSessionRequest.newBuilder() + .setTableReference(tableReference) + .setParent(parent) + .setRequestedStreams(requestedStreams) + .build(); + return createReadSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Read sessions automatically expire 6 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   Storage.CreateReadSessionRequest request =
+   *       Storage.CreateReadSessionRequest.newBuilder()
+   *           .setTableReference(TableReferenceProto.TableReference.newBuilder().build())
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setTableModifiers(TableReferenceProto.TableModifiers.newBuilder().build())
+   *           .setRequestedStreams(1017221410)
+   *           .setReadOptions(ReadOptions.TableReadOptions.newBuilder().build())
+   *           .setFormat(Storage.DataFormat.forNumber(0))
+   *           .setShardingStrategy(Storage.ShardingStrategy.forNumber(0))
+   *           .build();
+   *   Storage.ReadSession response = baseBigQueryStorageClient.createReadSession(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Storage.ReadSession createReadSession(Storage.CreateReadSessionRequest request) { + return createReadSessionCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Read sessions automatically expire 6 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   Storage.CreateReadSessionRequest request =
+   *       Storage.CreateReadSessionRequest.newBuilder()
+   *           .setTableReference(TableReferenceProto.TableReference.newBuilder().build())
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setTableModifiers(TableReferenceProto.TableModifiers.newBuilder().build())
+   *           .setRequestedStreams(1017221410)
+   *           .setReadOptions(ReadOptions.TableReadOptions.newBuilder().build())
+   *           .setFormat(Storage.DataFormat.forNumber(0))
+   *           .setShardingStrategy(Storage.ShardingStrategy.forNumber(0))
+   *           .build();
+   *   ApiFuture future =
+   *       baseBigQueryStorageClient.createReadSessionCallable().futureCall(request);
+   *   // Do something.
+   *   Storage.ReadSession response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + createReadSessionCallable() { + return stub.createReadSessionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Reads rows from the table in the format prescribed by the read session. Each response contains + * one or more table rows, up to a maximum of 10 MiB per response; read requests which attempt to + * read individual rows larger than this will fail. + * + *

Each request also returns a set of stream statistics reflecting the estimated total number + * of rows in the read stream. This number is computed based on the total table size and the + * number of active streams in the read session, and may change as other streams continue to read + * data. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   Storage.ReadRowsRequest request =
+   *       Storage.ReadRowsRequest.newBuilder()
+   *           .setReadPosition(Storage.StreamPosition.newBuilder().build())
+   *           .build();
+   *   ServerStream stream =
+   *       baseBigQueryStorageClient.readRowsCallable().call(request);
+   *   for (Storage.ReadRowsResponse response : stream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final ServerStreamingCallable + readRowsCallable() { + return stub.readRowsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates additional streams for a ReadSession. This API can be used to dynamically adjust the + * parallelism of a batch processing task upwards by adding additional workers. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   Storage.ReadSession session = Storage.ReadSession.newBuilder().build();
+   *   int requestedStreams = 1017221410;
+   *   Storage.BatchCreateReadSessionStreamsResponse response =
+   *       baseBigQueryStorageClient.batchCreateReadSessionStreams(session, requestedStreams);
+   * }
+   * }
+ * + * @param session Required. Must be a non-expired session obtained from a call to + * CreateReadSession. Only the name field needs to be set. + * @param requestedStreams Required. Number of new streams requested. Must be positive. Number of + * added streams may be less than this, see CreateReadSessionRequest for more information. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Storage.BatchCreateReadSessionStreamsResponse batchCreateReadSessionStreams( + Storage.ReadSession session, int requestedStreams) { + Storage.BatchCreateReadSessionStreamsRequest request = + Storage.BatchCreateReadSessionStreamsRequest.newBuilder() + .setSession(session) + .setRequestedStreams(requestedStreams) + .build(); + return batchCreateReadSessionStreams(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates additional streams for a ReadSession. This API can be used to dynamically adjust the + * parallelism of a batch processing task upwards by adding additional workers. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   Storage.BatchCreateReadSessionStreamsRequest request =
+   *       Storage.BatchCreateReadSessionStreamsRequest.newBuilder()
+   *           .setSession(Storage.ReadSession.newBuilder().build())
+   *           .setRequestedStreams(1017221410)
+   *           .build();
+   *   Storage.BatchCreateReadSessionStreamsResponse response =
+   *       baseBigQueryStorageClient.batchCreateReadSessionStreams(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Storage.BatchCreateReadSessionStreamsResponse batchCreateReadSessionStreams( + Storage.BatchCreateReadSessionStreamsRequest request) { + return batchCreateReadSessionStreamsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates additional streams for a ReadSession. This API can be used to dynamically adjust the + * parallelism of a batch processing task upwards by adding additional workers. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   Storage.BatchCreateReadSessionStreamsRequest request =
+   *       Storage.BatchCreateReadSessionStreamsRequest.newBuilder()
+   *           .setSession(Storage.ReadSession.newBuilder().build())
+   *           .setRequestedStreams(1017221410)
+   *           .build();
+   *   ApiFuture future =
+   *       baseBigQueryStorageClient.batchCreateReadSessionStreamsCallable().futureCall(request);
+   *   // Do something.
+   *   Storage.BatchCreateReadSessionStreamsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable< + Storage.BatchCreateReadSessionStreamsRequest, + Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsCallable() { + return stub.batchCreateReadSessionStreamsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Causes a single stream in a ReadSession to gracefully stop. This API can be used to dynamically + * adjust the parallelism of a batch processing task downwards without losing data. + * + *

This API does not delete the stream -- it remains visible in the ReadSession, and any data + * processed by the stream is not released to other streams. However, no additional data will be + * assigned to the stream once this call completes. Callers must continue reading data on the + * stream until the end of the stream is reached so that data which has already been assigned to + * the stream will be processed. + * + *

This method will return an error if there are no other live streams in the Session, or if + * SplitReadStream() has been called on the given Stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   Storage.Stream stream = Storage.Stream.newBuilder().build();
+   *   baseBigQueryStorageClient.finalizeStream(stream);
+   * }
+   * }
+ * + * @param stream Required. Stream to finalize. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void finalizeStream(Storage.Stream stream) { + Storage.FinalizeStreamRequest request = + Storage.FinalizeStreamRequest.newBuilder().setStream(stream).build(); + finalizeStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Causes a single stream in a ReadSession to gracefully stop. This API can be used to dynamically + * adjust the parallelism of a batch processing task downwards without losing data. + * + *

This API does not delete the stream -- it remains visible in the ReadSession, and any data + * processed by the stream is not released to other streams. However, no additional data will be + * assigned to the stream once this call completes. Callers must continue reading data on the + * stream until the end of the stream is reached so that data which has already been assigned to + * the stream will be processed. + * + *

This method will return an error if there are no other live streams in the Session, or if + * SplitReadStream() has been called on the given Stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   Storage.FinalizeStreamRequest request =
+   *       Storage.FinalizeStreamRequest.newBuilder()
+   *           .setStream(Storage.Stream.newBuilder().build())
+   *           .build();
+   *   baseBigQueryStorageClient.finalizeStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void finalizeStream(Storage.FinalizeStreamRequest request) { + finalizeStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Causes a single stream in a ReadSession to gracefully stop. This API can be used to dynamically + * adjust the parallelism of a batch processing task downwards without losing data. + * + *

This API does not delete the stream -- it remains visible in the ReadSession, and any data + * processed by the stream is not released to other streams. However, no additional data will be + * assigned to the stream once this call completes. Callers must continue reading data on the + * stream until the end of the stream is reached so that data which has already been assigned to + * the stream will be processed. + * + *

This method will return an error if there are no other live streams in the Session, or if + * SplitReadStream() has been called on the given Stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   Storage.FinalizeStreamRequest request =
+   *       Storage.FinalizeStreamRequest.newBuilder()
+   *           .setStream(Storage.Stream.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       baseBigQueryStorageClient.finalizeStreamCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable finalizeStreamCallable() { + return stub.finalizeStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Splits a given read stream into two Streams. These streams are referred to as the primary and + * the residual of the split. The original stream can still be read from in the same manner as + * before. Both of the returned streams can also be read from, and the total rows return by both + * child streams will be the same as the rows read from the original stream. + * + *

Moreover, the two child streams will be allocated back to back in the original Stream. + * Concretely, it is guaranteed that for streams Original, Primary, and Residual, that + * Original[0-j] = Primary[0-j] and Original[j-n] = Residual[0-m] once the streams have been read + * to completion. + * + *

This method is guaranteed to be idempotent. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   Storage.Stream originalStream = Storage.Stream.newBuilder().build();
+   *   Storage.SplitReadStreamResponse response =
+   *       baseBigQueryStorageClient.splitReadStream(originalStream);
+   * }
+   * }
+ * + * @param originalStream Required. Stream to split. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Storage.SplitReadStreamResponse splitReadStream(Storage.Stream originalStream) { + Storage.SplitReadStreamRequest request = + Storage.SplitReadStreamRequest.newBuilder().setOriginalStream(originalStream).build(); + return splitReadStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Splits a given read stream into two Streams. These streams are referred to as the primary and + * the residual of the split. The original stream can still be read from in the same manner as + * before. Both of the returned streams can also be read from, and the total rows return by both + * child streams will be the same as the rows read from the original stream. + * + *

Moreover, the two child streams will be allocated back to back in the original Stream. + * Concretely, it is guaranteed that for streams Original, Primary, and Residual, that + * Original[0-j] = Primary[0-j] and Original[j-n] = Residual[0-m] once the streams have been read + * to completion. + * + *

This method is guaranteed to be idempotent. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   Storage.SplitReadStreamRequest request =
+   *       Storage.SplitReadStreamRequest.newBuilder()
+   *           .setOriginalStream(Storage.Stream.newBuilder().build())
+   *           .setFraction(-1653751294)
+   *           .build();
+   *   Storage.SplitReadStreamResponse response = baseBigQueryStorageClient.splitReadStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Storage.SplitReadStreamResponse splitReadStream( + Storage.SplitReadStreamRequest request) { + return splitReadStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Splits a given read stream into two Streams. These streams are referred to as the primary and + * the residual of the split. The original stream can still be read from in the same manner as + * before. Both of the returned streams can also be read from, and the total rows return by both + * child streams will be the same as the rows read from the original stream. + * + *

Moreover, the two child streams will be allocated back to back in the original Stream. + * Concretely, it is guaranteed that for streams Original, Primary, and Residual, that + * Original[0-j] = Primary[0-j] and Original[j-n] = Residual[0-m] once the streams have been read + * to completion. + * + *

This method is guaranteed to be idempotent. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+   *   Storage.SplitReadStreamRequest request =
+   *       Storage.SplitReadStreamRequest.newBuilder()
+   *           .setOriginalStream(Storage.Stream.newBuilder().build())
+   *           .setFraction(-1653751294)
+   *           .build();
+   *   ApiFuture future =
+   *       baseBigQueryStorageClient.splitReadStreamCallable().futureCall(request);
+   *   // Do something.
+   *   Storage.SplitReadStreamResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + splitReadStreamCallable() { + return stub.splitReadStreamCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageSettings.java new file mode 100644 index 000000000000..495c661835f0 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageSettings.java @@ -0,0 +1,258 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta1.stub.BigQueryStorageStubSettings; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BaseBigQueryStorageClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createReadSession: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BaseBigQueryStorageSettings.Builder baseBigQueryStorageSettingsBuilder =
+ *     BaseBigQueryStorageSettings.newBuilder();
+ * baseBigQueryStorageSettingsBuilder
+ *     .createReadSessionSettings()
+ *     .setRetrySettings(
+ *         baseBigQueryStorageSettingsBuilder
+ *             .createReadSessionSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * BaseBigQueryStorageSettings baseBigQueryStorageSettings =
+ *     baseBigQueryStorageSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class BaseBigQueryStorageSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings + createReadSessionSettings() { + return ((BigQueryStorageStubSettings) getStubSettings()).createReadSessionSettings(); + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings + readRowsSettings() { + return ((BigQueryStorageStubSettings) getStubSettings()).readRowsSettings(); + } + + /** Returns the object with the settings used for calls to batchCreateReadSessionStreams. */ + public UnaryCallSettings< + Storage.BatchCreateReadSessionStreamsRequest, + Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsSettings() { + return ((BigQueryStorageStubSettings) getStubSettings()) + .batchCreateReadSessionStreamsSettings(); + } + + /** Returns the object with the settings used for calls to finalizeStream. */ + public UnaryCallSettings finalizeStreamSettings() { + return ((BigQueryStorageStubSettings) getStubSettings()).finalizeStreamSettings(); + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return ((BigQueryStorageStubSettings) getStubSettings()).splitReadStreamSettings(); + } + + public static final BaseBigQueryStorageSettings create(BigQueryStorageStubSettings stub) + throws IOException { + return new BaseBigQueryStorageSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return BigQueryStorageStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return BigQueryStorageStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return BigQueryStorageStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return BigQueryStorageStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return BigQueryStorageStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return BigQueryStorageStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return BigQueryStorageStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BaseBigQueryStorageSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for BaseBigQueryStorageSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(BigQueryStorageStubSettings.newBuilder(clientContext)); + } + + protected Builder(BaseBigQueryStorageSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(BigQueryStorageStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(BigQueryStorageStubSettings.newBuilder()); + } + + public BigQueryStorageStubSettings.Builder getStubSettingsBuilder() { + return ((BigQueryStorageStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return getStubSettingsBuilder().createReadSessionSettings(); + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return getStubSettingsBuilder().readRowsSettings(); + } + + /** Returns the builder for the settings used for calls to batchCreateReadSessionStreams. */ + public UnaryCallSettings.Builder< + Storage.BatchCreateReadSessionStreamsRequest, + Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsSettings() { + return getStubSettingsBuilder().batchCreateReadSessionStreamsSettings(); + } + + /** Returns the builder for the settings used for calls to finalizeStream. */ + public UnaryCallSettings.Builder + finalizeStreamSettings() { + return getStubSettingsBuilder().finalizeStreamSettings(); + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder< + Storage.SplitReadStreamRequest, Storage.SplitReadStreamResponse> + splitReadStreamSettings() { + return getStubSettingsBuilder().splitReadStreamSettings(); + } + + @Override + public BaseBigQueryStorageSettings build() throws IOException { + return new BaseBigQueryStorageSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClient.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClient.java new file mode 100644 index 000000000000..ca1a87427c1a --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClient.java @@ -0,0 +1,616 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession; +import com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.Stream; +import com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference; +import com.google.cloud.bigquery.storage.v1beta1.stub.EnhancedBigQueryStorageStub; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +/** + * Service Description: BigQuery storage API. + * + *

The BigQuery storage API can be used to read data stored in BigQuery. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

+ * 
+ * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+ *   TableReference tableReference = TableReference.newBuilder().build();
+ *   String parent = "";
+ *   int requestedStreams = 0;
+ *   ReadSession response = bigQueryStorageClient.createReadSession(tableReference, parent, requestedStreams);
+ * }
+ * 
+ * 
+ * + *

Note: close() needs to be called on the bigQueryStorageClient object to clean up resources + * such as threads. In the example above, try-with-resources is used, which automatically calls + * close(). + * + *

The surface of this class includes several types of Java methods for each of the API's + * methods: + * + *

    + *
  1. A "flattened" method. With this type of method, the fields of the request type have been + * converted into function parameters. It may be the case that not all fields are available as + * parameters, and not every API method will have a flattened method entry point. + *
  2. A "request object" method. This type of method only takes one parameter, a request object, + * which must be constructed before the call. Not every API method will have a request object + * method. + *
  3. A "callable" method. This type of method takes no parameters and returns an immutable API + * callable object, which can be used to initiate calls to the service. + *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of BigQueryStorageSettings to + * create(). For example: + * + *

To customize credentials: + * + *

+ * 
+ * BigQueryStorageSettings bigQueryStorageSettings =
+ *     BigQueryStorageSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * BigQueryStorageClient bigQueryStorageClient =
+ *     BigQueryStorageClient.create(bigQueryStorageSettings);
+ * 
+ * 
+ * + * To customize the endpoint: + * + *
+ * 
+ * BigQueryStorageSettings bigQueryStorageSettings =
+ *     BigQueryStorageSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * BigQueryStorageClient bigQueryStorageClient =
+ *     BigQueryStorageClient.create(bigQueryStorageSettings);
+ * 
+ * 
+ */ +@BetaApi +public class BigQueryStorageClient implements BackgroundResource { + + private final BigQueryStorageSettings settings; + private final EnhancedBigQueryStorageStub stub; + + /** Constructs an instance of {@link BigQueryStorageClient} with default settings. */ + public static final BigQueryStorageClient create() throws IOException { + return create(BigQueryStorageSettings.newBuilder().build()); + } + + /** + * Constructs an instance of {@link BigQueryStorageClient}, using the given settings. The channels + * are created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final BigQueryStorageClient create(BigQueryStorageSettings settings) + throws IOException { + return new BigQueryStorageClient(settings); + } + + /** + * Constructs an instance of {@link BigQueryStorageClient}, using the given stub for making calls. + * This is for advanced usage - prefer to use BigQueryStorageSettings}. + */ + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public static final BigQueryStorageClient create(EnhancedBigQueryStorageStub stub) { + return new BigQueryStorageClient(stub); + } + + /** + * Constructs an instance of {@link BigQueryStorageClient}, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected BigQueryStorageClient(BigQueryStorageSettings settings) throws IOException { + this.settings = settings; + this.stub = + EnhancedBigQueryStorageStub.create( + settings.getTypedStubSettings(), settings.getReadRowsRetryAttemptListener()); + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + protected BigQueryStorageClient(EnhancedBigQueryStorageStub stub) { + this.settings = null; + this.stub = stub; + } + + public final BigQueryStorageSettings getSettings() { + return settings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public EnhancedBigQueryStorageStub getStub() { + return stub; + } + + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Read sessions automatically expire 24 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   TableReference tableReference = TableReference.newBuilder().build();
+   *   String parent = "";
+   *   int requestedStreams = 0;
+   *   ReadSession response = bigQueryStorageClient.createReadSession(tableReference, parent, requestedStreams);
+   * }
+   * 
+ * + * @param tableReference Required. Reference to the table to read. + * @param parent Required. String of the form "projects/your-project-id" indicating the project + * this ReadSession is associated with. This is the project that will be billed for usage. + * @param requestedStreams Optional. Initial number of streams. If unset or 0, we will provide a + * value of streams so as to produce reasonable throughput. Must be non-negative. The number + * of streams may be lower than the requested number, depending on the amount parallelism that + * is reasonable for the table and the maximum amount of parallelism allowed by the system. + *

Streams must be read starting from offset 0. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession( + TableReference tableReference, String parent, int requestedStreams) { + + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setTableReference(tableReference) + .setParent(parent) + .setRequestedStreams(requestedStreams) + .build(); + return createReadSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Read sessions automatically expire 24 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   TableReference tableReference = TableReference.newBuilder().build();
+   *   String parent = "";
+   *   CreateReadSessionRequest request = CreateReadSessionRequest.newBuilder()
+   *     .setTableReference(tableReference)
+   *     .setParent(parent)
+   *     .build();
+   *   ReadSession response = bigQueryStorageClient.createReadSession(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession(CreateReadSessionRequest request) { + return createReadSessionCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Read sessions automatically expire 24 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   TableReference tableReference = TableReference.newBuilder().build();
+   *   String parent = "";
+   *   CreateReadSessionRequest request = CreateReadSessionRequest.newBuilder()
+   *     .setTableReference(tableReference)
+   *     .setParent(parent)
+   *     .build();
+   *   ApiFuture<ReadSession> future = bigQueryStorageClient.createReadSessionCallable().futureCall(request);
+   *   // Do something
+   *   ReadSession response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable createReadSessionCallable() { + return stub.createReadSessionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Reads rows from the table in the format prescribed by the read session. Each response contains + * one or more table rows, up to a maximum of 10 MiB per response; read requests which attempt to + * read individual rows larger than this will fail. + * + *

Each request also returns a set of stream statistics reflecting the estimated total number + * of rows in the read stream. This number is computed based on the total table size and the + * number of active streams in the read session, and may change as other streams continue to read + * data. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   StreamPosition readPosition = StreamPosition.newBuilder().build();
+   *   ReadRowsRequest request = ReadRowsRequest.newBuilder()
+   *     .setReadPosition(readPosition)
+   *     .build();
+   *
+   *   ServerStream<ReadRowsResponse> stream = bigQueryStorageClient.readRowsCallable().call(request);
+   *   for (ReadRowsResponse response : stream) {
+   *     // Do something when receive a response
+   *   }
+   * }
+   * 
+ */ + public final ServerStreamingCallable readRowsCallable() { + return stub.readRowsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates additional streams for a ReadSession. This API can be used to dynamically adjust the + * parallelism of a batch processing task upwards by adding additional workers. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   ReadSession session = ReadSession.newBuilder().build();
+   *   int requestedStreams = 0;
+   *   BatchCreateReadSessionStreamsResponse response = bigQueryStorageClient.batchCreateReadSessionStreams(session, requestedStreams);
+   * }
+   * 
+ * + * @param session Required. Must be a non-expired session obtained from a call to + * CreateReadSession. Only the name field needs to be set. + * @param requestedStreams Required. Number of new streams requested. Must be positive. Number of + * added streams may be less than this, see CreateReadSessionRequest for more information. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCreateReadSessionStreamsResponse batchCreateReadSessionStreams( + ReadSession session, int requestedStreams) { + + BatchCreateReadSessionStreamsRequest request = + BatchCreateReadSessionStreamsRequest.newBuilder() + .setSession(session) + .setRequestedStreams(requestedStreams) + .build(); + return batchCreateReadSessionStreams(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates additional streams for a ReadSession. This API can be used to dynamically adjust the + * parallelism of a batch processing task upwards by adding additional workers. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   ReadSession session = ReadSession.newBuilder().build();
+   *   int requestedStreams = 0;
+   *   BatchCreateReadSessionStreamsRequest request = BatchCreateReadSessionStreamsRequest.newBuilder()
+   *     .setSession(session)
+   *     .setRequestedStreams(requestedStreams)
+   *     .build();
+   *   BatchCreateReadSessionStreamsResponse response = bigQueryStorageClient.batchCreateReadSessionStreams(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCreateReadSessionStreamsResponse batchCreateReadSessionStreams( + BatchCreateReadSessionStreamsRequest request) { + return batchCreateReadSessionStreamsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates additional streams for a ReadSession. This API can be used to dynamically adjust the + * parallelism of a batch processing task upwards by adding additional workers. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   ReadSession session = ReadSession.newBuilder().build();
+   *   int requestedStreams = 0;
+   *   BatchCreateReadSessionStreamsRequest request = BatchCreateReadSessionStreamsRequest.newBuilder()
+   *     .setSession(session)
+   *     .setRequestedStreams(requestedStreams)
+   *     .build();
+   *   ApiFuture<BatchCreateReadSessionStreamsResponse> future = bigQueryStorageClient.batchCreateReadSessionStreamsCallable().futureCall(request);
+   *   // Do something
+   *   BatchCreateReadSessionStreamsResponse response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable< + BatchCreateReadSessionStreamsRequest, BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsCallable() { + return stub.batchCreateReadSessionStreamsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Triggers the graceful termination of a single stream in a ReadSession. This API can be used to + * dynamically adjust the parallelism of a batch processing task downwards without losing data. + * + *

This API does not delete the stream -- it remains visible in the ReadSession, and any data + * processed by the stream is not released to other streams. However, no additional data will be + * assigned to the stream once this call completes. Callers must continue reading data on the + * stream until the end of the stream is reached so that data which has already been assigned to + * the stream will be processed. + * + *

This method will return an error if there are no other live streams in the Session, or if + * SplitReadStream() has been called on the given Stream. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   Stream stream = Stream.newBuilder().build();
+   *   bigQueryStorageClient.finalizeStream(stream);
+   * }
+   * 
+ * + * @param stream Stream to finalize. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void finalizeStream(Stream stream) { + FinalizeStreamRequest request = FinalizeStreamRequest.newBuilder().setStream(stream).build(); + finalizeStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Triggers the graceful termination of a single stream in a ReadSession. This API can be used to + * dynamically adjust the parallelism of a batch processing task downwards without losing data. + * + *

This API does not delete the stream -- it remains visible in the ReadSession, and any data + * processed by the stream is not released to other streams. However, no additional data will be + * assigned to the stream once this call completes. Callers must continue reading data on the + * stream until the end of the stream is reached so that data which has already been assigned to + * the stream will be processed. + * + *

This method will return an error if there are no other live streams in the Session, or if + * SplitReadStream() has been called on the given Stream. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   Stream stream = Stream.newBuilder().build();
+   *   FinalizeStreamRequest request = FinalizeStreamRequest.newBuilder()
+   *     .setStream(stream)
+   *     .build();
+   *   bigQueryStorageClient.finalizeStream(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void finalizeStream(FinalizeStreamRequest request) { + finalizeStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Triggers the graceful termination of a single stream in a ReadSession. This API can be used to + * dynamically adjust the parallelism of a batch processing task downwards without losing data. + * + *

This API does not delete the stream -- it remains visible in the ReadSession, and any data + * processed by the stream is not released to other streams. However, no additional data will be + * assigned to the stream once this call completes. Callers must continue reading data on the + * stream until the end of the stream is reached so that data which has already been assigned to + * the stream will be processed. + * + *

This method will return an error if there are no other live streams in the Session, or if + * SplitReadStream() has been called on the given Stream. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   Stream stream = Stream.newBuilder().build();
+   *   FinalizeStreamRequest request = FinalizeStreamRequest.newBuilder()
+   *     .setStream(stream)
+   *     .build();
+   *   ApiFuture<Void> future = bigQueryStorageClient.finalizeStreamCallable().futureCall(request);
+   *   // Do something
+   *   future.get();
+   * }
+   * 
+ */ + public final UnaryCallable finalizeStreamCallable() { + return stub.finalizeStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Splits a given read stream into two Streams. These streams are referred to as the primary and + * the residual of the split. The original stream can still be read from in the same manner as + * before. Both of the returned streams can also be read from, and the total rows return by both + * child streams will be the same as the rows read from the original stream. + * + *

Moreover, the two child streams will be allocated back to back in the original Stream. + * Concretely, it is guaranteed that for streams Original, Primary, and Residual, that + * Original[0-j] = Primary[0-j] and Original[j-n] = Residual[0-m] once the streams have been read + * to completion. + * + *

This method is guaranteed to be idempotent. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   Stream originalStream = Stream.newBuilder().build();
+   *   SplitReadStreamResponse response = bigQueryStorageClient.splitReadStream(originalStream);
+   * }
+   * 
+ * + * @param originalStream Stream to split. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final SplitReadStreamResponse splitReadStream(Stream originalStream) { + SplitReadStreamRequest request = + SplitReadStreamRequest.newBuilder().setOriginalStream(originalStream).build(); + return splitReadStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Splits a given read stream into two Streams. These streams are referred to as the primary and + * the residual of the split. The original stream can still be read from in the same manner as + * before. Both of the returned streams can also be read from, and the total rows return by both + * child streams will be the same as the rows read from the original stream. + * + *

Moreover, the two child streams will be allocated back to back in the original Stream. + * Concretely, it is guaranteed that for streams Original, Primary, and Residual, that + * Original[0-j] = Primary[0-j] and Original[j-n] = Residual[0-m] once the streams have been read + * to completion. + * + *

This method is guaranteed to be idempotent. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   Stream originalStream = Stream.newBuilder().build();
+   *   SplitReadStreamRequest request = SplitReadStreamRequest.newBuilder()
+   *     .setOriginalStream(originalStream)
+   *     .build();
+   *   SplitReadStreamResponse response = bigQueryStorageClient.splitReadStream(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final SplitReadStreamResponse splitReadStream(SplitReadStreamRequest request) { + return splitReadStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Splits a given read stream into two Streams. These streams are referred to as the primary and + * the residual of the split. The original stream can still be read from in the same manner as + * before. Both of the returned streams can also be read from, and the total rows return by both + * child streams will be the same as the rows read from the original stream. + * + *

Moreover, the two child streams will be allocated back to back in the original Stream. + * Concretely, it is guaranteed that for streams Original, Primary, and Residual, that + * Original[0-j] = Primary[0-j] and Original[j-n] = Residual[0-m] once the streams have been read + * to completion. + * + *

This method is guaranteed to be idempotent. + * + *

Sample code: + * + *


+   * try (BigQueryStorageClient bigQueryStorageClient = BigQueryStorageClient.create()) {
+   *   Stream originalStream = Stream.newBuilder().build();
+   *   SplitReadStreamRequest request = SplitReadStreamRequest.newBuilder()
+   *     .setOriginalStream(originalStream)
+   *     .build();
+   *   ApiFuture<SplitReadStreamResponse> future = bigQueryStorageClient.splitReadStreamCallable().futureCall(request);
+   *   // Do something
+   *   SplitReadStreamResponse response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable + splitReadStreamCallable() { + return stub.splitReadStreamCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageSettings.java new file mode 100644 index 000000000000..58c090ab17d0 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageSettings.java @@ -0,0 +1,267 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta1; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession; +import com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse; +import com.google.cloud.bigquery.storage.v1beta1.stub.EnhancedBigQueryStorageStubSettings; +import com.google.protobuf.Empty; +import io.grpc.Metadata; +import io.grpc.Status; +import java.io.IOException; +import java.util.List; + +/** + * Settings class to configure an instance of {@link BigQueryStorageClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of createReadSession to 30 seconds: + * + *

+ * 
+ * BigQueryStorageSettings.Builder settingsBuilder = BaseBigQueryStorageSettings.newBuilder();
+ * settingsBuilder.createReadSessionSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * BaseBigQueryStorageSettings settings = settingsBuilder.build();
+ * 
+ * 
+ */ +@BetaApi +public class BigQueryStorageSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings createReadSessionSettings() { + return getTypedStubSettings().createReadSessionSettings(); + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings readRowsSettings() { + return getTypedStubSettings().readRowsSettings(); + } + + public static interface RetryAttemptListener { + public void onRetryAttempt(Status prevStatus, Metadata prevMetadata); + } + + private RetryAttemptListener readRowsRetryAttemptListener = null; + + /** + * If a non null readRowsRetryAttemptListener is provided, client will call onRetryAttempt + * function before a failed ReadRows request is retried. This can be used as negative feedback + * mechanism for future decision to split read streams because some retried failures are due to + * resource exhaustion that increased parallelism only makes it worse. + */ + public void setReadRowsRetryAttemptListener(RetryAttemptListener readRowsRetryAttemptListener) { + this.readRowsRetryAttemptListener = readRowsRetryAttemptListener; + } + + public RetryAttemptListener getReadRowsRetryAttemptListener() { + return readRowsRetryAttemptListener; + } + + /** Returns the object with the settings used for calls to batchCreateReadSessionStreams. */ + public UnaryCallSettings< + BatchCreateReadSessionStreamsRequest, BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsSettings() { + return getTypedStubSettings().batchCreateReadSessionStreamsSettings(); + } + + /** Returns the object with the settings used for calls to finalizeStream. */ + public UnaryCallSettings finalizeStreamSettings() { + return getTypedStubSettings().finalizeStreamSettings(); + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return getTypedStubSettings().splitReadStreamSettings(); + } + + EnhancedBigQueryStorageStubSettings getTypedStubSettings() { + return (EnhancedBigQueryStorageStubSettings) getStubSettings(); + } + + public static final BigQueryStorageSettings create(EnhancedBigQueryStorageStubSettings settings) + throws IOException { + return new BigQueryStorageSettings.Builder(settings.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return EnhancedBigQueryStorageStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return EnhancedBigQueryStorageStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return EnhancedBigQueryStorageStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return EnhancedBigQueryStorageStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return EnhancedBigQueryStorageStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return EnhancedBigQueryStorageStubSettings.defaultTransportChannelProvider(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return EnhancedBigQueryStorageStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryStorageSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for BigQueryStorageSettings. */ + public static class Builder extends ClientSettings.Builder { + protected Builder() throws IOException { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(EnhancedBigQueryStorageStubSettings.newBuilder(clientContext)); + } + + private static Builder createDefault() { + return new Builder(EnhancedBigQueryStorageStubSettings.newBuilder()); + } + + protected Builder(BigQueryStorageSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(EnhancedBigQueryStorageStubSettings.Builder stubSettings) { + super(stubSettings); + } + + public EnhancedBigQueryStorageStubSettings.Builder getStubSettingsBuilder() { + return ((EnhancedBigQueryStorageStubSettings.Builder) getStubSettings()); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + private RetryAttemptListener readRowsRetryAttemptListener = null; + + public Builder setReadRowsRetryAttemptListener( + RetryAttemptListener readRowsRetryAttemptListener) { + this.readRowsRetryAttemptListener = readRowsRetryAttemptListener; + return this; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return getStubSettingsBuilder().createReadSessionSettings(); + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return getStubSettingsBuilder().readRowsSettings(); + } + + /** Returns the builder for the settings used for calls to batchCreateReadSessionStreams. */ + public UnaryCallSettings.Builder< + BatchCreateReadSessionStreamsRequest, BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsSettings() { + return getStubSettingsBuilder().batchCreateReadSessionStreamsSettings(); + } + + /** Returns the builder for the settings used for calls to finalizeStream. */ + public UnaryCallSettings.Builder finalizeStreamSettings() { + return getStubSettingsBuilder().finalizeStreamSettings(); + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder + splitReadStreamSettings() { + return getStubSettingsBuilder().splitReadStreamSettings(); + } + + @Override + public BigQueryStorageSettings build() throws IOException { + BigQueryStorageSettings settings = new BigQueryStorageSettings(this); + settings.setReadRowsRetryAttemptListener(readRowsRetryAttemptListener); + return settings; + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/gapic_metadata.json b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/gapic_metadata.json new file mode 100644 index 000000000000..461683abc1fe --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/gapic_metadata.json @@ -0,0 +1,33 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.cloud.bigquery.storage.v1beta1", + "libraryPackage": "com.google.cloud.bigquery.storage.v1beta1", + "services": { + "BigQueryStorage": { + "clients": { + "grpc": { + "libraryClient": "BaseBigQueryStorageClient", + "rpcs": { + "BatchCreateReadSessionStreams": { + "methods": ["batchCreateReadSessionStreams", "batchCreateReadSessionStreams", "batchCreateReadSessionStreamsCallable"] + }, + "CreateReadSession": { + "methods": ["createReadSession", "createReadSession", "createReadSession", "createReadSessionCallable"] + }, + "FinalizeStream": { + "methods": ["finalizeStream", "finalizeStream", "finalizeStreamCallable"] + }, + "ReadRows": { + "methods": ["readRowsCallable"] + }, + "SplitReadStream": { + "methods": ["splitReadStream", "splitReadStream", "splitReadStreamCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/package-info.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/package-info.java new file mode 100644 index 000000000000..ea7daa69dc76 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/package-info.java @@ -0,0 +1,53 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to BigQuery Storage API + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= BaseBigQueryStorageClient ======================= + * + *

Service Description: BigQuery storage API. + * + *

The BigQuery storage API can be used to read data stored in BigQuery. + * + *

The v1beta1 API is not yet officially deprecated, and will go through a full deprecation cycle + * (https://cloud.google.com/products#product-launch-stages) before the service is turned down. + * However, new code should use the v1 API going forward. + * + *

Sample for BaseBigQueryStorageClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
+ *   TableReferenceProto.TableReference tableReference =
+ *       TableReferenceProto.TableReference.newBuilder().build();
+ *   ProjectName parent = ProjectName.of("[PROJECT]");
+ *   int requestedStreams = 1017221410;
+ *   Storage.ReadSession response =
+ *       baseBigQueryStorageClient.createReadSession(tableReference, parent, requestedStreams);
+ * }
+ * }
+ */ +@Generated("by gapic-generator-java") +package com.google.cloud.bigquery.storage.v1beta1; + +import javax.annotation.Generated; diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStub.java new file mode 100644 index 000000000000..175c5be35205 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStub.java @@ -0,0 +1,66 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta1.Storage; +import com.google.protobuf.Empty; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the BigQueryStorage service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@BetaApi +@Generated("by gapic-generator-java") +public abstract class BigQueryStorageStub implements BackgroundResource { + + public UnaryCallable + createReadSessionCallable() { + throw new UnsupportedOperationException("Not implemented: createReadSessionCallable()"); + } + + public ServerStreamingCallable + readRowsCallable() { + throw new UnsupportedOperationException("Not implemented: readRowsCallable()"); + } + + public UnaryCallable< + Storage.BatchCreateReadSessionStreamsRequest, + Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsCallable() { + throw new UnsupportedOperationException( + "Not implemented: batchCreateReadSessionStreamsCallable()"); + } + + public UnaryCallable finalizeStreamCallable() { + throw new UnsupportedOperationException("Not implemented: finalizeStreamCallable()"); + } + + public UnaryCallable + splitReadStreamCallable() { + throw new UnsupportedOperationException("Not implemented: splitReadStreamCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStubSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStubSettings.java new file mode 100644 index 000000000000..420390330702 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStubSettings.java @@ -0,0 +1,455 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta1.Storage; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BigQueryStorageStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createReadSession: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BigQueryStorageStubSettings.Builder baseBigQueryStorageSettingsBuilder =
+ *     BigQueryStorageStubSettings.newBuilder();
+ * baseBigQueryStorageSettingsBuilder
+ *     .createReadSessionSettings()
+ *     .setRetrySettings(
+ *         baseBigQueryStorageSettingsBuilder
+ *             .createReadSessionSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * BigQueryStorageStubSettings baseBigQueryStorageSettings =
+ *     baseBigQueryStorageSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class BigQueryStorageStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/bigquery") + .add("https://www.googleapis.com/auth/cloud-platform") + .build(); + + private final UnaryCallSettings + createReadSessionSettings; + private final ServerStreamingCallSettings + readRowsSettings; + private final UnaryCallSettings< + Storage.BatchCreateReadSessionStreamsRequest, + Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsSettings; + private final UnaryCallSettings finalizeStreamSettings; + private final UnaryCallSettings + splitReadStreamSettings; + + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings + createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings + readRowsSettings() { + return readRowsSettings; + } + + /** Returns the object with the settings used for calls to batchCreateReadSessionStreams. */ + public UnaryCallSettings< + Storage.BatchCreateReadSessionStreamsRequest, + Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsSettings() { + return batchCreateReadSessionStreamsSettings; + } + + /** Returns the object with the settings used for calls to finalizeStream. */ + public UnaryCallSettings finalizeStreamSettings() { + return finalizeStreamSettings; + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + public BigQueryStorageStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcBigQueryStorageStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "bigquerystorage"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "bigquerystorage.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "bigquerystorage.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(BigQueryStorageStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryStorageStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createReadSessionSettings = settingsBuilder.createReadSessionSettings().build(); + readRowsSettings = settingsBuilder.readRowsSettings().build(); + batchCreateReadSessionStreamsSettings = + settingsBuilder.batchCreateReadSessionStreamsSettings().build(); + finalizeStreamSettings = settingsBuilder.finalizeStreamSettings().build(); + splitReadStreamSettings = settingsBuilder.splitReadStreamSettings().build(); + } + + /** Builder for BigQueryStorageStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder + createReadSessionSettings; + private final ServerStreamingCallSettings.Builder< + Storage.ReadRowsRequest, Storage.ReadRowsResponse> + readRowsSettings; + private final UnaryCallSettings.Builder< + Storage.BatchCreateReadSessionStreamsRequest, + Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsSettings; + private final UnaryCallSettings.Builder + finalizeStreamSettings; + private final UnaryCallSettings.Builder< + Storage.SplitReadStreamRequest, Storage.SplitReadStreamResponse> + splitReadStreamSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_0_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + definitions.put( + "retry_policy_1_codes", + ImmutableSet.copyOf(Lists.newArrayList(StatusCode.Code.UNAVAILABLE))); + definitions.put( + "retry_policy_2_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(600000L)) + .build(); + definitions.put("retry_policy_0_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(86400000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(86400000L)) + .setTotalTimeoutDuration(Duration.ofMillis(86400000L)) + .build(); + definitions.put("retry_policy_1_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(600000L)) + .build(); + definitions.put("retry_policy_2_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createReadSessionSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + readRowsSettings = ServerStreamingCallSettings.newBuilder(); + batchCreateReadSessionStreamsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + finalizeStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + splitReadStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, + batchCreateReadSessionStreamsSettings, + finalizeStreamSettings, + splitReadStreamSettings); + initDefaults(this); + } + + protected Builder(BigQueryStorageStubSettings settings) { + super(settings); + + createReadSessionSettings = settings.createReadSessionSettings.toBuilder(); + readRowsSettings = settings.readRowsSettings.toBuilder(); + batchCreateReadSessionStreamsSettings = + settings.batchCreateReadSessionStreamsSettings.toBuilder(); + finalizeStreamSettings = settings.finalizeStreamSettings.toBuilder(); + splitReadStreamSettings = settings.splitReadStreamSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, + batchCreateReadSessionStreamsSettings, + finalizeStreamSettings, + splitReadStreamSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .createReadSessionSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .readRowsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_1_params")); + + builder + .batchCreateReadSessionStreamsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); + + builder + .finalizeStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); + + builder + .splitReadStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return readRowsSettings; + } + + /** Returns the builder for the settings used for calls to batchCreateReadSessionStreams. */ + public UnaryCallSettings.Builder< + Storage.BatchCreateReadSessionStreamsRequest, + Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsSettings() { + return batchCreateReadSessionStreamsSettings; + } + + /** Returns the builder for the settings used for calls to finalizeStream. */ + public UnaryCallSettings.Builder + finalizeStreamSettings() { + return finalizeStreamSettings; + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder< + Storage.SplitReadStreamRequest, Storage.SplitReadStreamResponse> + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + @Override + public BigQueryStorageStubSettings build() throws IOException { + return new BigQueryStorageStubSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStub.java new file mode 100644 index 000000000000..b6fc8868e613 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStub.java @@ -0,0 +1,227 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta1.stub; + +import com.google.api.core.InternalApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcRawCallableFactory; +import com.google.api.gax.retrying.ExponentialRetryAlgorithm; +import com.google.api.gax.retrying.ScheduledRetryingExecutor; +import com.google.api.gax.retrying.StreamingRetryAlgorithm; +import com.google.api.gax.rpc.Callables; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsExtractor; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.api.gax.tracing.SpanName; +import com.google.api.gax.tracing.TracedServerStreamingCallable; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageGrpc; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageSettings; +import com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession; +import com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse; +import com.google.cloud.bigquery.storage.v1beta1.stub.readrows.ApiResultRetryAlgorithm; +import com.google.cloud.bigquery.storage.v1beta1.stub.readrows.ReadRowsRetryingCallable; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Enhanced stub class for BigQuery Storage API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +public class EnhancedBigQueryStorageStub implements BackgroundResource { + + private static final String TRACING_OUTER_CLIENT_NAME = "BigQueryStorage"; + private final GrpcBigQueryStorageStub stub; + private final BigQueryStorageStubSettings stubSettings; + private final BigQueryStorageSettings.RetryAttemptListener readRowsRetryAttemptListener; + private final ClientContext context; + + public static EnhancedBigQueryStorageStub create(EnhancedBigQueryStorageStubSettings settings) + throws IOException { + return create(settings, null); + } + + public static EnhancedBigQueryStorageStub create( + EnhancedBigQueryStorageStubSettings settings, + BigQueryStorageSettings.RetryAttemptListener readRowsRetryAttemptListener) + throws IOException { + // Configure the base settings. + BigQueryStorageStubSettings.Builder baseSettingsBuilder = + BigQueryStorageStubSettings.newBuilder() + .setUniverseDomain(settings.getUniverseDomain()) + .setTransportChannelProvider(settings.getTransportChannelProvider()) + .setHeaderProvider(settings.getHeaderProvider()) + .setCredentialsProvider(settings.getCredentialsProvider()) + .setStreamWatchdogCheckInterval(settings.getStreamWatchdogCheckInterval()) + .setStreamWatchdogProvider(settings.getStreamWatchdogProvider()); + + // CreateReadSession is a simple pass-through. + baseSettingsBuilder + .createReadSessionSettings() + .setRetryableCodes(settings.createReadSessionSettings().getRetryableCodes()) + .setRetrySettings(settings.createReadSessionSettings().getRetrySettings()); + + // ReadRows is a simple pass-through. + baseSettingsBuilder + .readRowsSettings() + .setRetryableCodes(settings.readRowsSettings().getRetryableCodes()) + .setRetrySettings(settings.readRowsSettings().getRetrySettings()) + .setResumptionStrategy(settings.readRowsSettings().getResumptionStrategy()) + .setIdleTimeout(settings.readRowsSettings().getIdleTimeout()); + + // BatchCreateReadSessionStreams is a simple pass-through. + baseSettingsBuilder + .batchCreateReadSessionStreamsSettings() + .setRetryableCodes(settings.batchCreateReadSessionStreamsSettings().getRetryableCodes()) + .setRetrySettings(settings.batchCreateReadSessionStreamsSettings().getRetrySettings()); + + // FinalizeStream is a simple pass-through. + baseSettingsBuilder + .finalizeStreamSettings() + .setRetryableCodes(settings.finalizeStreamSettings().getRetryableCodes()) + .setRetrySettings(settings.finalizeStreamSettings().getRetrySettings()); + + // SplitReadStream is a simple pass-through. + baseSettingsBuilder + .splitReadStreamSettings() + .setRetryableCodes(settings.splitReadStreamSettings().getRetryableCodes()) + .setRetrySettings(settings.splitReadStreamSettings().getRetrySettings()); + + BigQueryStorageStubSettings baseSettings = baseSettingsBuilder.build(); + ClientContext clientContext = ClientContext.create(baseSettings); + GrpcBigQueryStorageStub stub = new GrpcBigQueryStorageStub(baseSettings, clientContext); + return new EnhancedBigQueryStorageStub( + stub, baseSettings, readRowsRetryAttemptListener, clientContext); + } + + @InternalApi("Visible for testing") + EnhancedBigQueryStorageStub( + GrpcBigQueryStorageStub stub, + BigQueryStorageStubSettings stubSettings, + BigQueryStorageSettings.RetryAttemptListener readRowsRetryAttemptListener, + ClientContext context) { + this.stub = stub; + this.stubSettings = stubSettings; + this.readRowsRetryAttemptListener = readRowsRetryAttemptListener; + this.context = context; + } + + public UnaryCallable createReadSessionCallable() { + return stub.createReadSessionCallable(); + } + + public ServerStreamingCallable readRowsCallable() { + ServerStreamingCallable innerCallable = + GrpcRawCallableFactory.createServerStreamingCallable( + GrpcCallSettings.newBuilder() + .setMethodDescriptor(BigQueryStorageGrpc.getReadRowsMethod()) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract(ReadRowsRequest request) { + return ImmutableMap.of( + "read_position.stream.name", + String.valueOf(request.getReadPosition().getStream().getName())); + } + }) + .build(), + stubSettings.readRowsSettings().getRetryableCodes()); + ServerStreamingCallSettings callSettings = + stubSettings.readRowsSettings(); + + StreamingRetryAlgorithm retryAlgorithm = + new StreamingRetryAlgorithm<>( + new ApiResultRetryAlgorithm(readRowsRetryAttemptListener), + new ExponentialRetryAlgorithm(callSettings.getRetrySettings(), context.getClock())); + + ScheduledRetryingExecutor retryingExecutor = + new ScheduledRetryingExecutor<>(retryAlgorithm, context.getExecutor()); + + if (context.getStreamWatchdog() != null) { + innerCallable = Callables.watched(innerCallable, callSettings, context); + } + + ReadRowsRetryingCallable outerCallable = + new ReadRowsRetryingCallable( + context.getDefaultCallContext(), + innerCallable, + retryingExecutor, + callSettings.getResumptionStrategy()); + + ServerStreamingCallable traced = + new TracedServerStreamingCallable<>( + outerCallable, + context.getTracerFactory(), + SpanName.of(TRACING_OUTER_CLIENT_NAME, "ReadRows")); + return traced.withDefaultCallContext(context.getDefaultCallContext()); + } + + public UnaryCallable + batchCreateReadSessionStreamsCallable() { + return stub.batchCreateReadSessionStreamsCallable(); + } + + public UnaryCallable finalizeStreamCallable() { + return stub.finalizeStreamCallable(); + } + + public UnaryCallable splitReadStreamCallable() { + return stub.splitReadStreamCallable(); + } + + @Override + public void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettings.java new file mode 100644 index 000000000000..f603d046b00b --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettings.java @@ -0,0 +1,289 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta1.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession; +import com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse; +import com.google.cloud.bigquery.storage.v1beta1.stub.readrows.ReadRowsResumptionStrategy; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.Empty; +import java.util.List; + +/** + * Settings class to configure an instance of {@link EnhancedBigQueryStorageStub}. + * + *

The default instance dynamically reads and applies the default values used by {@link + * BigQueryStorageStub}. + * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of createReadSession to 30 seconds: + * + *

+ * 
+ * EnhancedBigQueryStorageStubSettings.Builder builder =
+ *     EnhancedBigQueryStorageStubSettings.newBuilder();
+ * builder.createReadSessionSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * EnhancedBigQueryStorageStubSettings settings = builder.build();
+ * 
+ * 
+ */ +public class EnhancedBigQueryStorageStubSettings + extends StubSettings { + + private final UnaryCallSettings createReadSessionSettings; + private final ServerStreamingCallSettings readRowsSettings; + private final UnaryCallSettings< + BatchCreateReadSessionStreamsRequest, BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsSettings; + private final UnaryCallSettings finalizeStreamSettings; + private final UnaryCallSettings + splitReadStreamSettings; + + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings readRowsSettings() { + return readRowsSettings; + } + + /** Returns the object with the settings used for calls to batchCreateReadSessionStreams. */ + public UnaryCallSettings< + BatchCreateReadSessionStreamsRequest, BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsSettings() { + return batchCreateReadSessionStreamsSettings; + } + + /** Returns the object with the settings used for calls to finalizeStream. */ + public UnaryCallSettings finalizeStreamSettings() { + return finalizeStreamSettings; + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return BigQueryStorageStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "bigquerystorage"; + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return BigQueryStorageStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return BigQueryStorageStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return BigQueryStorageStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return BigQueryStorageStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return BigQueryStorageStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return new Builder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected EnhancedBigQueryStorageStubSettings(Builder settingsBuilder) { + super(settingsBuilder); + + createReadSessionSettings = settingsBuilder.createReadSessionSettings().build(); + readRowsSettings = settingsBuilder.readRowsSettings().build(); + batchCreateReadSessionStreamsSettings = + settingsBuilder.batchCreateReadSessionStreamsSettings().build(); + finalizeStreamSettings = settingsBuilder.finalizeStreamSettings().build(); + splitReadStreamSettings = settingsBuilder.splitReadStreamSettings().build(); + } + + /** Builder for {@link EnhancedBigQueryStorageStubSettings}. */ + public static class Builder + extends StubSettings.Builder { + + private final ImmutableList> unaryMethodSettingsBuilders; + + private final UnaryCallSettings.Builder + createReadSessionSettings; + private final ServerStreamingCallSettings.Builder + readRowsSettings; + private final UnaryCallSettings.Builder< + BatchCreateReadSessionStreamsRequest, BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsSettings; + private final UnaryCallSettings.Builder finalizeStreamSettings; + private final UnaryCallSettings.Builder + splitReadStreamSettings; + + protected Builder() { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + // Defaults provider + BigQueryStorageStubSettings.Builder baseDefaults = BigQueryStorageStubSettings.newBuilder(); + setTransportChannelProvider(defaultTransportChannelProvider()); + setCredentialsProvider(baseDefaults.getCredentialsProvider()); + setStreamWatchdogCheckInterval(baseDefaults.getStreamWatchdogCheckInterval()); + setStreamWatchdogProvider(baseDefaults.getStreamWatchdogProvider()); + + // Per-method settings using baseSettings for defaults. + createReadSessionSettings = baseDefaults.createReadSessionSettings(); + batchCreateReadSessionStreamsSettings = baseDefaults.batchCreateReadSessionStreamsSettings(); + finalizeStreamSettings = baseDefaults.finalizeStreamSettings(); + splitReadStreamSettings = baseDefaults.splitReadStreamSettings(); + + // Per-method settings using override values for defaults. + readRowsSettings = + baseDefaults.readRowsSettings().setResumptionStrategy(new ReadRowsResumptionStrategy()); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, + batchCreateReadSessionStreamsSettings, + finalizeStreamSettings, + splitReadStreamSettings); + } + + protected Builder(EnhancedBigQueryStorageStubSettings settings) { + super(settings); + + createReadSessionSettings = settings.createReadSessionSettings.toBuilder(); + readRowsSettings = settings.readRowsSettings.toBuilder(); + batchCreateReadSessionStreamsSettings = + settings.batchCreateReadSessionStreamsSettings.toBuilder(); + finalizeStreamSettings = settings.finalizeStreamSettings.toBuilder(); + splitReadStreamSettings = settings.splitReadStreamSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, + batchCreateReadSessionStreamsSettings, + finalizeStreamSettings, + splitReadStreamSettings); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return readRowsSettings; + } + + /** Returns the builder for the settings used for calls to batchCreateReadSessionStreams. */ + public UnaryCallSettings.Builder< + BatchCreateReadSessionStreamsRequest, BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsSettings() { + return batchCreateReadSessionStreamsSettings; + } + + /** Returns the builder for the settings used for calls to finalizeStream. */ + public UnaryCallSettings.Builder finalizeStreamSettings() { + return finalizeStreamSettings; + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + @Override + public EnhancedBigQueryStorageStubSettings build() { + return new EnhancedBigQueryStorageStubSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageCallableFactory.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageCallableFactory.java new file mode 100644 index 000000000000..c993ba046ec7 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageCallableFactory.java @@ -0,0 +1,115 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the BigQueryStorage service API. + * + *

This class is for advanced usage. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class GrpcBigQueryStorageCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageStub.java new file mode 100644 index 000000000000..3434d9e47c4f --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageStub.java @@ -0,0 +1,337 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta1.Storage; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the BigQueryStorage service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class GrpcBigQueryStorageStub extends BigQueryStorageStub { + private static final MethodDescriptor + createReadSessionMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta1.BigQueryStorage/CreateReadSession") + .setRequestMarshaller( + ProtoUtils.marshaller(Storage.CreateReadSessionRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(Storage.ReadSession.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + readRowsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName("google.cloud.bigquery.storage.v1beta1.BigQueryStorage/ReadRows") + .setRequestMarshaller( + ProtoUtils.marshaller(Storage.ReadRowsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(Storage.ReadRowsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + Storage.BatchCreateReadSessionStreamsRequest, + Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsMethodDescriptor = + MethodDescriptor + . + newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta1.BigQueryStorage/BatchCreateReadSessionStreams") + .setRequestMarshaller( + ProtoUtils.marshaller( + Storage.BatchCreateReadSessionStreamsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller( + Storage.BatchCreateReadSessionStreamsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + finalizeStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta1.BigQueryStorage/FinalizeStream") + .setRequestMarshaller( + ProtoUtils.marshaller(Storage.FinalizeStreamRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + Storage.SplitReadStreamRequest, Storage.SplitReadStreamResponse> + splitReadStreamMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta1.BigQueryStorage/SplitReadStream") + .setRequestMarshaller( + ProtoUtils.marshaller(Storage.SplitReadStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(Storage.SplitReadStreamResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable + createReadSessionCallable; + private final ServerStreamingCallable + readRowsCallable; + private final UnaryCallable< + Storage.BatchCreateReadSessionStreamsRequest, + Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsCallable; + private final UnaryCallable finalizeStreamCallable; + private final UnaryCallable + splitReadStreamCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcBigQueryStorageStub create(BigQueryStorageStubSettings settings) + throws IOException { + return new GrpcBigQueryStorageStub(settings, ClientContext.create(settings)); + } + + public static final GrpcBigQueryStorageStub create(ClientContext clientContext) + throws IOException { + return new GrpcBigQueryStorageStub( + BigQueryStorageStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcBigQueryStorageStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcBigQueryStorageStub( + BigQueryStorageStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcBigQueryStorageStub, using the given settings. This is protected + * so that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryStorageStub( + BigQueryStorageStubSettings settings, ClientContext clientContext) throws IOException { + this(settings, clientContext, new GrpcBigQueryStorageCallableFactory()); + } + + /** + * Constructs an instance of GrpcBigQueryStorageStub, using the given settings. This is protected + * so that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryStorageStub( + BigQueryStorageStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings + createReadSessionTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createReadSessionMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "table_reference.dataset_id", + String.valueOf(request.getTableReference().getDatasetId())); + builder.add( + "table_reference.project_id", + String.valueOf(request.getTableReference().getProjectId())); + return builder.build(); + }) + .build(); + GrpcCallSettings readRowsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(readRowsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "read_position.stream.name", + String.valueOf(request.getReadPosition().getStream().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings< + Storage.BatchCreateReadSessionStreamsRequest, + Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsTransportSettings = + GrpcCallSettings + . + newBuilder() + .setMethodDescriptor(batchCreateReadSessionStreamsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session.name", String.valueOf(request.getSession().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings finalizeStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(finalizeStreamMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("stream.name", String.valueOf(request.getStream().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + splitReadStreamTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(splitReadStreamMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "original_stream.name", + String.valueOf(request.getOriginalStream().getName())); + return builder.build(); + }) + .build(); + + this.createReadSessionCallable = + callableFactory.createUnaryCallable( + createReadSessionTransportSettings, + settings.createReadSessionSettings(), + clientContext); + this.readRowsCallable = + callableFactory.createServerStreamingCallable( + readRowsTransportSettings, settings.readRowsSettings(), clientContext); + this.batchCreateReadSessionStreamsCallable = + callableFactory.createUnaryCallable( + batchCreateReadSessionStreamsTransportSettings, + settings.batchCreateReadSessionStreamsSettings(), + clientContext); + this.finalizeStreamCallable = + callableFactory.createUnaryCallable( + finalizeStreamTransportSettings, settings.finalizeStreamSettings(), clientContext); + this.splitReadStreamCallable = + callableFactory.createUnaryCallable( + splitReadStreamTransportSettings, settings.splitReadStreamSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable + createReadSessionCallable() { + return createReadSessionCallable; + } + + @Override + public ServerStreamingCallable + readRowsCallable() { + return readRowsCallable; + } + + @Override + public UnaryCallable< + Storage.BatchCreateReadSessionStreamsRequest, + Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreamsCallable() { + return batchCreateReadSessionStreamsCallable; + } + + @Override + public UnaryCallable finalizeStreamCallable() { + return finalizeStreamCallable; + } + + @Override + public UnaryCallable + splitReadStreamCallable() { + return splitReadStreamCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ApiResultRetryAlgorithm.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ApiResultRetryAlgorithm.java new file mode 100644 index 000000000000..0b46c429a666 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ApiResultRetryAlgorithm.java @@ -0,0 +1,90 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1.stub.readrows; + +import com.google.api.core.InternalApi; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.retrying.TimedAttemptSettings; +import com.google.api.gax.rpc.ApiException; +import com.google.cloud.bigquery.storage.util.Errors; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageSettings; +import io.grpc.Metadata; +import io.grpc.Status; +import java.time.Duration; + +/** For internal use, public for technical reasons. */ +@InternalApi +public class ApiResultRetryAlgorithm implements ResultRetryAlgorithm { + // Duration to sleep on if the error is DEADLINE_EXCEEDED. + public static final Duration DEADLINE_SLEEP_DURATION = Duration.ofMillis(1); + + private final BigQueryStorageSettings.RetryAttemptListener retryAttemptListener; + + public ApiResultRetryAlgorithm() { + this(null); + } + + public ApiResultRetryAlgorithm( + BigQueryStorageSettings.RetryAttemptListener retryAttemptListener) { + super(); + this.retryAttemptListener = retryAttemptListener; + } + + @Override + public TimedAttemptSettings createNextAttempt( + Throwable prevThrowable, ResponseT prevResponse, TimedAttemptSettings prevSettings) { + if (prevThrowable != null) { + Status status = Status.fromThrowable(prevThrowable); + Metadata metadata = Status.trailersFromThrowable(prevThrowable); + Errors.IsRetryableStatusResult result = Errors.isRetryableStatus(status, metadata); + if (result.isRetryable) { + // If result.retryDelay isn't null, we know exactly how long we must wait, so both regular + // and randomized delays are the same. + Duration retryDelay = result.retryDelay; + Duration randomizedRetryDelay = result.retryDelay; + if (retryDelay == null) { + retryDelay = prevSettings.getRetryDelayDuration(); + randomizedRetryDelay = DEADLINE_SLEEP_DURATION; + } + if (retryAttemptListener != null) { + retryAttemptListener.onRetryAttempt(status, metadata); + } + return TimedAttemptSettings.newBuilder() + .setGlobalSettings(prevSettings.getGlobalSettings()) + .setRetryDelayDuration(retryDelay) + .setRpcTimeout(prevSettings.getRpcTimeout()) + .setRandomizedRetryDelayDuration(randomizedRetryDelay) + .setAttemptCount(prevSettings.getAttemptCount() + 1) + .setFirstAttemptStartTimeNanos(prevSettings.getFirstAttemptStartTimeNanos()) + .build(); + } + } + return null; + } + + @Override + public boolean shouldRetry(Throwable prevThrowable, ResponseT prevResponse) { + if (prevThrowable != null) { + Status status = Status.fromThrowable(prevThrowable); + Metadata metadata = Status.trailersFromThrowable(prevThrowable); + if (Errors.isRetryableStatus(status, metadata).isRetryable) { + return true; + } + } + return (prevThrowable instanceof ApiException) && ((ApiException) prevThrowable).isRetryable(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsAttemptCallable.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsAttemptCallable.java new file mode 100644 index 000000000000..01f695ec4d7f --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsAttemptCallable.java @@ -0,0 +1,326 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1.stub.readrows; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.retrying.ServerStreamingAttemptException; +import com.google.api.gax.retrying.StreamResumptionStrategy; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StateCheckingResponseObserver; +import com.google.api.gax.rpc.StreamController; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse; +import com.google.common.base.Preconditions; +import java.time.Duration; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import javax.annotation.concurrent.GuardedBy; + +final class ReadRowsAttemptCallable implements Callable { + private final Object lock = new Object(); + + private final ServerStreamingCallable innerCallable; + private final StreamResumptionStrategy resumptionStrategy; + private final ReadRowsRequest initialRequest; + private ApiCallContext context; + private final ResponseObserver outerObserver; + + // Start state + private boolean autoFlowControl = true; + private boolean isStarted; + + // Outer state + @GuardedBy("lock") + private Throwable cancellationCause; + + @GuardedBy("lock") + private int pendingRequests; + + private RetryingFuture outerRetryingFuture; + + // Internal retry state + private int numAttempts; + + @GuardedBy("lock") + private StreamController innerController; + + private boolean seenSuccessSinceLastError; + private SettableApiFuture innerAttemptFuture; + + ReadRowsAttemptCallable( + ServerStreamingCallable innerCallable, + StreamResumptionStrategy resumptionStrategy, + ReadRowsRequest initialRequest, + ApiCallContext context, + ResponseObserver outerObserver) { + this.innerCallable = innerCallable; + this.resumptionStrategy = resumptionStrategy; + this.initialRequest = initialRequest; + this.context = context; + this.outerObserver = outerObserver; + } + + /** Sets controlling {@link RetryingFuture}. Must be called be before {@link #start()}. */ + void setExternalFuture(RetryingFuture retryingFuture) { + Preconditions.checkState(!isStarted, "Can't change the RetryingFuture once the call has start"); + Preconditions.checkNotNull(retryingFuture, "RetryingFuture can't be null"); + + this.outerRetryingFuture = retryingFuture; + } + + /** + * Starts the initial call. The call is attempted on the caller's thread. Further call attempts + * will be scheduled by the {@link RetryingFuture}. + */ + public void start() { + Preconditions.checkState(!isStarted, "Already started"); + + // Initialize the outer observer + outerObserver.onStart( + new StreamController() { + @Override + public void disableAutoInboundFlowControl() { + Preconditions.checkState( + !isStarted, "Can't disable auto flow control once the stream is started"); + autoFlowControl = false; + } + + @Override + public void request(int count) { + onRequest(count); + } + + @Override + public void cancel() { + onCancel(); + } + }); + + if (autoFlowControl) { + synchronized (lock) { + pendingRequests = Integer.MAX_VALUE; + } + } + isStarted = true; + + // Propagate the totalTimeout as the overall stream deadline. + Duration totalTimeout = + outerRetryingFuture.getAttemptSettings().getGlobalSettings().getTotalTimeoutDuration(); + + if (totalTimeout != null && context != null) { + context = context.withTimeoutDuration(totalTimeout); + } + + // Call the inner callable + call(); + } + + /** + * Sends the actual RPC. The request being sent will first be transformed by the {@link + * StreamResumptionStrategy}. + * + *

This method expects to be called by one thread at a time. Furthermore, it expects that the + * current RPC finished before the next time it's called. + */ + @Override + public Void call() { + Preconditions.checkState(isStarted, "Must be started first"); + + ReadRowsRequest request = + (++numAttempts == 1) ? initialRequest : resumptionStrategy.getResumeRequest(initialRequest); + + // Should never happen. onAttemptError will check if ResumptionStrategy can create a resume + // request, + // which the RetryingFuture/StreamResumptionStrategy should respect. + Preconditions.checkState(request != null, "ResumptionStrategy returned a null request."); + + innerAttemptFuture = SettableApiFuture.create(); + seenSuccessSinceLastError = false; + + ApiCallContext attemptContext = context; + + if (!outerRetryingFuture.getAttemptSettings().getRpcTimeout().isZero()) { + attemptContext = + attemptContext.withStreamWaitTimeout( + outerRetryingFuture.getAttemptSettings().getRpcTimeout()); + } + + attemptContext + .getTracer() + .attemptStarted(outerRetryingFuture.getAttemptSettings().getOverallAttemptCount()); + + innerCallable.call( + request, + new StateCheckingResponseObserver() { + @Override + public void onStartImpl(StreamController controller) { + onAttemptStart(controller); + } + + @Override + public void onResponseImpl(ReadRowsResponse response) { + onAttemptResponse(response); + } + + @Override + public void onErrorImpl(Throwable t) { + onAttemptError(t); + } + + @Override + public void onCompleteImpl() { + onAttemptComplete(); + } + }, + attemptContext); + + outerRetryingFuture.setAttemptFuture(innerAttemptFuture); + + return null; + } + + /** + * Called by the inner {@link ServerStreamingCallable} when the call is about to start. This will + * transfer unfinished state from the previous attempt. + * + * @see ResponseObserver#onStart(StreamController) + */ + private void onAttemptStart(StreamController controller) { + if (!autoFlowControl) { + controller.disableAutoInboundFlowControl(); + } + + Throwable localCancellationCause; + int numToRequest = 0; + + synchronized (lock) { + innerController = controller; + + localCancellationCause = this.cancellationCause; + + if (!autoFlowControl) { + numToRequest = pendingRequests; + } + } + + if (localCancellationCause != null) { + controller.cancel(); + } else if (numToRequest > 0) { + controller.request(numToRequest); + } + } + + /** + * Called when the outer {@link ResponseObserver} wants to prematurely cancel the stream. + * + * @see StreamController#cancel() + */ + private void onCancel() { + StreamController localInnerController; + + synchronized (lock) { + if (cancellationCause != null) { + return; + } + // NOTE: BasicRetryingFuture will replace j.u.c.CancellationExceptions with it's own, + // which will not have the current stacktrace, so a special wrapper has be used here. + cancellationCause = + new ServerStreamingAttemptException( + new CancellationException("User cancelled stream"), + resumptionStrategy.canResume(), + seenSuccessSinceLastError); + localInnerController = innerController; + } + + if (localInnerController != null) { + localInnerController.cancel(); + } + } + + /** + * Called when the outer {@link ResponseObserver} is ready for more data. + * + * @see StreamController#request(int) + */ + private void onRequest(int count) { + Preconditions.checkState(!autoFlowControl, "Automatic flow control is enabled"); + Preconditions.checkArgument(count > 0, "Count must be > 0"); + + final StreamController localInnerController; + + synchronized (lock) { + int maxInc = Integer.MAX_VALUE - pendingRequests; + count = Math.min(maxInc, count); + + pendingRequests += count; + localInnerController = this.innerController; + } + + // Note: there is a race condition here where the count might go to the previous attempt's + // StreamController after it failed. But it doesn't matter, because the controller will just + // ignore it and the current controller will pick it up onStart. + if (localInnerController != null) { + localInnerController.request(count); + } + } + + /** Called when the inner callable has responses to deliver. */ + private void onAttemptResponse(ReadRowsResponse message) { + if (!autoFlowControl) { + synchronized (lock) { + pendingRequests--; + } + } + // Update local state to allow for future resume. + seenSuccessSinceLastError = true; + message = resumptionStrategy.processResponse(message); + // Notify the outer observer. + outerObserver.onResponse(message); + } + + /** + * Called when the current RPC fails. The error will be bubbled up to the outer {@link + * RetryingFuture} via the {@link #innerAttemptFuture}. + */ + private void onAttemptError(Throwable throwable) { + Throwable localCancellationCause; + synchronized (lock) { + localCancellationCause = cancellationCause; + } + + if (localCancellationCause != null) { + // Take special care to preserve the cancellation's stack trace. + innerAttemptFuture.setException(localCancellationCause); + } else { + // Wrap the original exception and provide more context for StreamingRetryAlgorithm. + innerAttemptFuture.setException( + new ServerStreamingAttemptException( + throwable, resumptionStrategy.canResume(), seenSuccessSinceLastError)); + } + } + + /** + * Called when the current RPC successfully completes. Notifies the outer {@link RetryingFuture} + * via {@link #innerAttemptFuture}. + */ + private void onAttemptComplete() { + innerAttemptFuture.set(null); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsResumptionStrategy.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsResumptionStrategy.java new file mode 100644 index 000000000000..dfe1cc0b5e5e --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsResumptionStrategy.java @@ -0,0 +1,74 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta1.stub.readrows; + +import com.google.api.core.InternalApi; +import com.google.api.gax.retrying.StreamResumptionStrategy; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse; +import javax.annotation.Nonnull; + +/** + * An implementation of a {@link StreamResumptionStrategy} for the ReadRows API. This class tracks + * the offset of the last row received and, upon retry, attempts to resume the stream at the next + * offset. + * + *

This class is considered an internal implementation detail and not meant to be used by + * applications. + */ +@InternalApi +public class ReadRowsResumptionStrategy + implements StreamResumptionStrategy { + + // Number of rows processed. + private long rowsProcessed = 0; + + @Override + @Nonnull + public StreamResumptionStrategy createNew() { + return new ReadRowsResumptionStrategy(); + } + + @Override + @Nonnull + public ReadRowsResponse processResponse(ReadRowsResponse response) { + rowsProcessed += response.getRowCount(); + return response; + } + + /** + * {@inheritDoc} + * + *

Given the initial/original request, this implementation generates a request that will yield + * a new stream whose first response would come right after the last response received by + * processResponse. It takes into account the offset from the original request. + */ + @Override + public ReadRowsRequest getResumeRequest(ReadRowsRequest originalRequest) { + ReadRowsRequest.Builder resumeRequestBuilder = originalRequest.toBuilder(); + + resumeRequestBuilder + .getReadPositionBuilder() + .setOffset(originalRequest.getReadPosition().getOffset() + rowsProcessed); + + return resumeRequestBuilder.build(); + } + + @Override + public boolean canResume() { + return true; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryingCallable.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryingCallable.java new file mode 100644 index 000000000000..a7fffccf25d8 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryingCallable.java @@ -0,0 +1,91 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1.stub.readrows; + +import static com.google.common.util.concurrent.MoreExecutors.directExecutor; + +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.retrying.ScheduledRetryingExecutor; +import com.google.api.gax.retrying.ServerStreamingAttemptException; +import com.google.api.gax.retrying.StreamResumptionStrategy; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse; + +public final class ReadRowsRetryingCallable + extends ServerStreamingCallable { + + private final ApiCallContext context; + private final ServerStreamingCallable innerCallable; + private final ScheduledRetryingExecutor executor; + private final StreamResumptionStrategy + resumptionStrategyPrototype; + + public ReadRowsRetryingCallable( + ApiCallContext context, + ServerStreamingCallable innerCallable, + ScheduledRetryingExecutor executor, + StreamResumptionStrategy resumptionStrategyPrototype) { + this.context = context; + this.innerCallable = innerCallable; + this.executor = executor; + this.resumptionStrategyPrototype = resumptionStrategyPrototype; + } + + @Override + public void call( + ReadRowsRequest request, + final ResponseObserver responseObserver, + ApiCallContext context) { + ApiCallContext actualContext = this.context.merge(context); + ReadRowsAttemptCallable attemptCallable = + new ReadRowsAttemptCallable( + innerCallable, + resumptionStrategyPrototype.createNew(), + request, + actualContext, + responseObserver); + + RetryingFuture retryingFuture = executor.createFuture(attemptCallable, actualContext); + attemptCallable.setExternalFuture(retryingFuture); + attemptCallable.start(); + + // Bridge the future result back to the external responseObserver + ApiFutures.addCallback( + retryingFuture, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable throwable) { + // Make sure to unwrap the underlying ApiException + if (throwable instanceof ServerStreamingAttemptException) { + throwable = throwable.getCause(); + } + responseObserver.onError(throwable); + } + + @Override + public void onSuccess(Void ignored) { + responseObserver.onComplete(); + } + }, + directExecutor()); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/package-info.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/package-info.java new file mode 100644 index 000000000000..91f83b51362c --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/package-info.java @@ -0,0 +1,16 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta1.stub.readrows; diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BQTableSchemaToProtoDescriptor.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BQTableSchemaToProtoDescriptor.java new file mode 100644 index 000000000000..05b6ac0611c5 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BQTableSchemaToProtoDescriptor.java @@ -0,0 +1,155 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.DescriptorProtos.DescriptorProto; +import com.google.protobuf.DescriptorProtos.FieldDescriptorProto; +import com.google.protobuf.DescriptorProtos.FileDescriptorProto; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.FileDescriptor; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +/** + * Converts a BQ table schema to protobuf descriptor. All field names will be converted to lowercase + * when constructing the protobuf descriptor. The mapping between field types and field modes are + * shown in the ImmutableMaps below. + */ +@Deprecated +public class BQTableSchemaToProtoDescriptor { + private static ImmutableMap + BQTableSchemaModeMap = + ImmutableMap.of( + TableFieldSchema.Mode.NULLABLE, FieldDescriptorProto.Label.LABEL_OPTIONAL, + TableFieldSchema.Mode.REPEATED, FieldDescriptorProto.Label.LABEL_REPEATED, + TableFieldSchema.Mode.REQUIRED, FieldDescriptorProto.Label.LABEL_REQUIRED); + + private static ImmutableMap + BQTableSchemaTypeMap = + new ImmutableMap.Builder() + .put(TableFieldSchema.Type.BOOL, FieldDescriptorProto.Type.TYPE_BOOL) + .put(TableFieldSchema.Type.BYTES, FieldDescriptorProto.Type.TYPE_BYTES) + .put(TableFieldSchema.Type.DATE, FieldDescriptorProto.Type.TYPE_INT32) + .put(TableFieldSchema.Type.DATETIME, FieldDescriptorProto.Type.TYPE_INT64) + .put(TableFieldSchema.Type.DOUBLE, FieldDescriptorProto.Type.TYPE_DOUBLE) + .put(TableFieldSchema.Type.GEOGRAPHY, FieldDescriptorProto.Type.TYPE_STRING) + .put(TableFieldSchema.Type.INT64, FieldDescriptorProto.Type.TYPE_INT64) + .put(TableFieldSchema.Type.NUMERIC, FieldDescriptorProto.Type.TYPE_BYTES) + .put(TableFieldSchema.Type.STRING, FieldDescriptorProto.Type.TYPE_STRING) + .put(TableFieldSchema.Type.STRUCT, FieldDescriptorProto.Type.TYPE_MESSAGE) + .put(TableFieldSchema.Type.TIME, FieldDescriptorProto.Type.TYPE_INT64) + .put(TableFieldSchema.Type.TIMESTAMP, FieldDescriptorProto.Type.TYPE_INT64) + .put(TableFieldSchema.Type.JSON, FieldDescriptorProto.Type.TYPE_STRING) + .put(TableFieldSchema.Type.INTERVAL, FieldDescriptorProto.Type.TYPE_STRING) + .build(); + + /** + * Converts TableFieldSchema to a Descriptors.Descriptor object. + * + * @param BQTableSchema + * @throws Descriptors.DescriptorValidationException + */ + public static Descriptor convertBQTableSchemaToProtoDescriptor(TableSchema BQTableSchema) + throws Descriptors.DescriptorValidationException { + Preconditions.checkNotNull(BQTableSchema, "BQTableSchema is null."); + return convertBQTableSchemaToProtoDescriptorImpl( + BQTableSchema, "root", new HashMap, Descriptor>()); + } + + /** + * Converts a TableFieldSchema to a Descriptors.Descriptor object. + * + * @param BQTableSchema + * @param scope Keeps track of current scope to prevent repeated naming while constructing + * descriptor. + * @param dependencyMap Stores already constructed descriptors to prevent reconstruction + * @throws Descriptors.DescriptorValidationException + */ + private static Descriptor convertBQTableSchemaToProtoDescriptorImpl( + TableSchema BQTableSchema, + String scope, + HashMap, Descriptor> dependencyMap) + throws Descriptors.DescriptorValidationException { + List dependenciesList = new ArrayList(); + List fields = new ArrayList(); + int index = 1; + for (TableFieldSchema BQTableField : BQTableSchema.getFieldsList()) { + String currentScope = scope + "__" + BQTableField.getName(); + if (BQTableField.getType() == TableFieldSchema.Type.STRUCT) { + ImmutableList fieldList = + ImmutableList.copyOf(BQTableField.getFieldsList()); + if (dependencyMap.containsKey(fieldList)) { + Descriptor descriptor = dependencyMap.get(fieldList); + dependenciesList.add(descriptor.getFile()); + fields.add(convertBQTableFieldToProtoField(BQTableField, index++, descriptor.getName())); + } else { + Descriptor descriptor = + convertBQTableSchemaToProtoDescriptorImpl( + TableSchema.newBuilder().addAllFields(fieldList).build(), + currentScope, + dependencyMap); + dependenciesList.add(descriptor.getFile()); + dependencyMap.put(fieldList, descriptor); + fields.add(convertBQTableFieldToProtoField(BQTableField, index++, currentScope)); + } + } else { + fields.add(convertBQTableFieldToProtoField(BQTableField, index++, currentScope)); + } + } + FileDescriptor[] dependenciesArray = new FileDescriptor[dependenciesList.size()]; + dependenciesArray = dependenciesList.toArray(dependenciesArray); + DescriptorProto descriptorProto = + DescriptorProto.newBuilder().setName(scope).addAllField(fields).build(); + FileDescriptorProto fileDescriptorProto = + FileDescriptorProto.newBuilder().addMessageType(descriptorProto).build(); + FileDescriptor fileDescriptor = + FileDescriptor.buildFrom(fileDescriptorProto, dependenciesArray); + Descriptor descriptor = fileDescriptor.findMessageTypeByName(scope); + return descriptor; + } + + /** + * Converts a BQTableField to ProtoField + * + * @param BQTableField BQ Field used to construct a FieldDescriptorProto + * @param index Index for protobuf fields. + * @param scope used to name descriptors + */ + private static FieldDescriptorProto convertBQTableFieldToProtoField( + TableFieldSchema BQTableField, int index, String scope) { + TableFieldSchema.Mode mode = BQTableField.getMode(); + String fieldName = BQTableField.getName().toLowerCase(); + if (BQTableField.getType() == TableFieldSchema.Type.STRUCT) { + return FieldDescriptorProto.newBuilder() + .setName(fieldName) + .setTypeName(scope) + .setLabel((FieldDescriptorProto.Label) BQTableSchemaModeMap.get(mode)) + .setNumber(index) + .build(); + } + return FieldDescriptorProto.newBuilder() + .setName(fieldName) + .setType((FieldDescriptorProto.Type) BQTableSchemaTypeMap.get(BQTableField.getType())) + .setLabel((FieldDescriptorProto.Label) BQTableSchemaModeMap.get(mode)) + .setNumber(index) + .build(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClient.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClient.java new file mode 100644 index 000000000000..5f7328eeac2b --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClient.java @@ -0,0 +1,559 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta2.stub.BigQueryReadStub; +import com.google.cloud.bigquery.storage.v1beta2.stub.BigQueryReadStubSettings; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: BigQuery Read API. + * + *

The Read API can be used to read data from BigQuery. + * + *

New code should use the v1 Read API going forward, if they don't use Write API at the same + * time. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+ *   ProjectName parent = ProjectName.of("[PROJECT]");
+ *   ReadSession readSession = ReadSession.newBuilder().build();
+ *   int maxStreamCount = 940837515;
+ *   ReadSession response =
+ *       baseBigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the BaseBigQueryReadClient object to clean up resources + * such as threads. In the example above, try-with-resources is used, which automatically calls + * close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

CreateReadSession

Creates a new read session. A read session divides the contents of a BigQuery table into one or more streams, which can then be used to read data from the table. The read session also specifies properties of the data to be read, such as a list of columns or a push-down filter describing the rows to be returned. + *

A particular row can be read by at most one stream. When the caller has reached the end of each stream in the session, then all the data in the table has been read. + *

Data is assigned to each stream such that roughly the same number of rows can be read from each stream. Because the server-side unit for assigning data is collections of rows, the API does not guarantee that each stream will return the same number or rows. Additionally, the limits are enforced based on the number of pre-filtered rows, so some filters can lead to lopsided assignments. + *

Read sessions automatically expire 6 hours after they are created and do not require manual clean-up by the caller.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createReadSession(CreateReadSessionRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createReadSession(ProjectName parent, ReadSession readSession, int maxStreamCount) + *

  • createReadSession(String parent, ReadSession readSession, int maxStreamCount) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createReadSessionCallable() + *

+ *

ReadRows

Reads rows from the stream in the format prescribed by the ReadSession. Each response contains one or more table rows, up to a maximum of 100 MiB per response; read requests which attempt to read individual rows larger than 100 MiB will fail. + *

Each request also returns a set of stream statistics reflecting the current state of the stream.

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • readRowsCallable() + *

+ *

SplitReadStream

Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are referred to as the primary and the residual streams of the split. The original `ReadStream` can still be read from in the same manner as before. Both of the returned `ReadStream` objects can also be read from, and the rows returned by both child streams will be the same as the rows read from the original stream. + *

Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. Concretely, it is guaranteed that for streams original, primary, and residual, that original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read to completion.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • splitReadStream(SplitReadStreamRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • splitReadStreamCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of BaseBigQueryReadSettings to + * create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BaseBigQueryReadSettings baseBigQueryReadSettings =
+ *     BaseBigQueryReadSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * BaseBigQueryReadClient baseBigQueryReadClient =
+ *     BaseBigQueryReadClient.create(baseBigQueryReadSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BaseBigQueryReadSettings baseBigQueryReadSettings =
+ *     BaseBigQueryReadSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * BaseBigQueryReadClient baseBigQueryReadClient =
+ *     BaseBigQueryReadClient.create(baseBigQueryReadSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class BaseBigQueryReadClient implements BackgroundResource { + private final BaseBigQueryReadSettings settings; + private final BigQueryReadStub stub; + + /** Constructs an instance of BaseBigQueryReadClient with default settings. */ + public static final BaseBigQueryReadClient create() throws IOException { + return create(BaseBigQueryReadSettings.newBuilder().build()); + } + + /** + * Constructs an instance of BaseBigQueryReadClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final BaseBigQueryReadClient create(BaseBigQueryReadSettings settings) + throws IOException { + return new BaseBigQueryReadClient(settings); + } + + /** + * Constructs an instance of BaseBigQueryReadClient, using the given stub for making calls. This + * is for advanced usage - prefer using create(BaseBigQueryReadSettings). + */ + public static final BaseBigQueryReadClient create(BigQueryReadStub stub) { + return new BaseBigQueryReadClient(stub); + } + + /** + * Constructs an instance of BaseBigQueryReadClient, using the given settings. This is protected + * so that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected BaseBigQueryReadClient(BaseBigQueryReadSettings settings) throws IOException { + this.settings = settings; + this.stub = ((BigQueryReadStubSettings) settings.getStubSettings()).createStub(); + } + + protected BaseBigQueryReadClient(BigQueryReadStub stub) { + this.settings = null; + this.stub = stub; + } + + public final BaseBigQueryReadSettings getSettings() { + return settings; + } + + public BigQueryReadStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 6 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   ProjectName parent = ProjectName.of("[PROJECT]");
+   *   ReadSession readSession = ReadSession.newBuilder().build();
+   *   int maxStreamCount = 940837515;
+   *   ReadSession response =
+   *       baseBigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+   * }
+   * }
+ * + * @param parent Required. The request project that owns the session, in the form of + * `projects/{project_id}`. + * @param readSession Required. Session to be created. + * @param maxStreamCount Max initial number of streams. If unset or zero, the server will provide + * a value of streams so as to produce reasonable throughput. Must be non-negative. The number + * of streams may be lower than the requested number, depending on the amount parallelism that + * is reasonable for the table. Error will be returned if the max count is greater than the + * current system max limit of 1,000. + *

Streams must be read starting from offset 0. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession( + ProjectName parent, ReadSession readSession, int maxStreamCount) { + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setReadSession(readSession) + .setMaxStreamCount(maxStreamCount) + .build(); + return createReadSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 6 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   String parent = ProjectName.of("[PROJECT]").toString();
+   *   ReadSession readSession = ReadSession.newBuilder().build();
+   *   int maxStreamCount = 940837515;
+   *   ReadSession response =
+   *       baseBigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+   * }
+   * }
+ * + * @param parent Required. The request project that owns the session, in the form of + * `projects/{project_id}`. + * @param readSession Required. Session to be created. + * @param maxStreamCount Max initial number of streams. If unset or zero, the server will provide + * a value of streams so as to produce reasonable throughput. Must be non-negative. The number + * of streams may be lower than the requested number, depending on the amount parallelism that + * is reasonable for the table. Error will be returned if the max count is greater than the + * current system max limit of 1,000. + *

Streams must be read starting from offset 0. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession( + String parent, ReadSession readSession, int maxStreamCount) { + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parent) + .setReadSession(readSession) + .setMaxStreamCount(maxStreamCount) + .build(); + return createReadSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 6 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   CreateReadSessionRequest request =
+   *       CreateReadSessionRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setReadSession(ReadSession.newBuilder().build())
+   *           .setMaxStreamCount(940837515)
+   *           .build();
+   *   ReadSession response = baseBigQueryReadClient.createReadSession(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession(CreateReadSessionRequest request) { + return createReadSessionCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 6 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   CreateReadSessionRequest request =
+   *       CreateReadSessionRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setReadSession(ReadSession.newBuilder().build())
+   *           .setMaxStreamCount(940837515)
+   *           .build();
+   *   ApiFuture future =
+   *       baseBigQueryReadClient.createReadSessionCallable().futureCall(request);
+   *   // Do something.
+   *   ReadSession response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createReadSessionCallable() { + return stub.createReadSessionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Reads rows from the stream in the format prescribed by the ReadSession. Each response contains + * one or more table rows, up to a maximum of 100 MiB per response; read requests which attempt to + * read individual rows larger than 100 MiB will fail. + * + *

Each request also returns a set of stream statistics reflecting the current state of the + * stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   ReadRowsRequest request =
+   *       ReadRowsRequest.newBuilder()
+   *           .setReadStream(
+   *               ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString())
+   *           .setOffset(-1019779949)
+   *           .build();
+   *   ServerStream stream =
+   *       baseBigQueryReadClient.readRowsCallable().call(request);
+   *   for (ReadRowsResponse response : stream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final ServerStreamingCallable readRowsCallable() { + return stub.readRowsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are + * referred to as the primary and the residual streams of the split. The original `ReadStream` can + * still be read from in the same manner as before. Both of the returned `ReadStream` objects can + * also be read from, and the rows returned by both child streams will be the same as the rows + * read from the original stream. + * + *

Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. + * Concretely, it is guaranteed that for streams original, primary, and residual, that + * original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read + * to completion. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   SplitReadStreamRequest request =
+   *       SplitReadStreamRequest.newBuilder()
+   *           .setName(
+   *               ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString())
+   *           .setFraction(-1653751294)
+   *           .build();
+   *   SplitReadStreamResponse response = baseBigQueryReadClient.splitReadStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final SplitReadStreamResponse splitReadStream(SplitReadStreamRequest request) { + return splitReadStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are + * referred to as the primary and the residual streams of the split. The original `ReadStream` can + * still be read from in the same manner as before. Both of the returned `ReadStream` objects can + * also be read from, and the rows returned by both child streams will be the same as the rows + * read from the original stream. + * + *

Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. + * Concretely, it is guaranteed that for streams original, primary, and residual, that + * original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read + * to completion. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   SplitReadStreamRequest request =
+   *       SplitReadStreamRequest.newBuilder()
+   *           .setName(
+   *               ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString())
+   *           .setFraction(-1653751294)
+   *           .build();
+   *   ApiFuture future =
+   *       baseBigQueryReadClient.splitReadStreamCallable().futureCall(request);
+   *   // Do something.
+   *   SplitReadStreamResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + splitReadStreamCallable() { + return stub.splitReadStreamCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadSettings.java new file mode 100644 index 000000000000..63dadc9d0a73 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadSettings.java @@ -0,0 +1,225 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta2.stub.BigQueryReadStubSettings; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BaseBigQueryReadClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createReadSession: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BaseBigQueryReadSettings.Builder baseBigQueryReadSettingsBuilder =
+ *     BaseBigQueryReadSettings.newBuilder();
+ * baseBigQueryReadSettingsBuilder
+ *     .createReadSessionSettings()
+ *     .setRetrySettings(
+ *         baseBigQueryReadSettingsBuilder
+ *             .createReadSessionSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * BaseBigQueryReadSettings baseBigQueryReadSettings = baseBigQueryReadSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class BaseBigQueryReadSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings createReadSessionSettings() { + return ((BigQueryReadStubSettings) getStubSettings()).createReadSessionSettings(); + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings readRowsSettings() { + return ((BigQueryReadStubSettings) getStubSettings()).readRowsSettings(); + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return ((BigQueryReadStubSettings) getStubSettings()).splitReadStreamSettings(); + } + + public static final BaseBigQueryReadSettings create(BigQueryReadStubSettings stub) + throws IOException { + return new BaseBigQueryReadSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return BigQueryReadStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return BigQueryReadStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return BigQueryReadStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return BigQueryReadStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return BigQueryReadStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return BigQueryReadStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return BigQueryReadStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BaseBigQueryReadSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for BaseBigQueryReadSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(BigQueryReadStubSettings.newBuilder(clientContext)); + } + + protected Builder(BaseBigQueryReadSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(BigQueryReadStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(BigQueryReadStubSettings.newBuilder()); + } + + public BigQueryReadStubSettings.Builder getStubSettingsBuilder() { + return ((BigQueryReadStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return getStubSettingsBuilder().createReadSessionSettings(); + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return getStubSettingsBuilder().readRowsSettings(); + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder + splitReadStreamSettings() { + return getStubSettingsBuilder().splitReadStreamSettings(); + } + + @Override + public BaseBigQueryReadSettings build() throws IOException { + return new BaseBigQueryReadSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoder.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoder.java new file mode 100644 index 000000000000..0110f1f60a8a --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoder.java @@ -0,0 +1,88 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This code was ported from ZetaSQL and can be found here: + * https://github.com/google/zetasql/blob/c55f967a5ae35b476437210c529691d8a73f5507/java/com/google/zetasql/Value.java + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.common.primitives.Bytes; +import com.google.protobuf.ByteString; +import java.math.BigDecimal; +import java.math.BigInteger; + +public class BigDecimalByteStringEncoder { + private static int NumericScale = 9; + private static final BigDecimal MAX_NUMERIC_VALUE = + new BigDecimal("99999999999999999999999999999.999999999"); + private static final BigDecimal MIN_NUMERIC_VALUE = + new BigDecimal("-99999999999999999999999999999.999999999"); + + public static ByteString encodeToNumericByteString(BigDecimal bigDecimal) { + ByteString byteString = + serializeBigDecimal( + bigDecimal, NumericScale, MAX_NUMERIC_VALUE, MIN_NUMERIC_VALUE, "ByteString"); + return byteString; + } + + public static BigDecimal decodeNumericByteString(ByteString byteString) { + BigDecimal bigDecimal = + deserializeBigDecimal( + byteString, NumericScale, MAX_NUMERIC_VALUE, MIN_NUMERIC_VALUE, "BigDecimal"); + return bigDecimal; + } + + // Make these private and make public wrapper that internalizes these min/max/scale/type + private static BigDecimal deserializeBigDecimal( + ByteString serializedValue, + int scale, + BigDecimal maxValue, + BigDecimal minValue, + String typeName) { + byte[] bytes = serializedValue.toByteArray(); + // NUMERIC/BIGNUMERIC values are serialized as scaled integers in two's complement form in + // little endian order. BigInteger requires the same encoding but in big endian order, + // therefore we must reverse the bytes that come from the proto. + Bytes.reverse(bytes); + BigInteger scaledValue = new BigInteger(bytes); + BigDecimal decimalValue = new BigDecimal(scaledValue, scale); + if (decimalValue.compareTo(maxValue) > 0 || decimalValue.compareTo(minValue) < 0) { + throw new IllegalArgumentException(typeName + " overflow: " + decimalValue.toPlainString()); + } + return decimalValue; + } + + /** Returns a numeric Value that equals to {@code v}. */ + private static ByteString serializeBigDecimal( + BigDecimal v, int scale, BigDecimal maxValue, BigDecimal minValue, String typeName) { + if (v.scale() > scale) { + throw new IllegalArgumentException( + typeName + " scale cannot exceed " + scale + ": " + v.toPlainString()); + } + if (v.compareTo(maxValue) > 0 || v.compareTo(minValue) < 0) { + throw new IllegalArgumentException(typeName + " overflow: " + v.toPlainString()); + } + byte[] bytes = v.setScale(scale).unscaledValue().toByteArray(); + // NUMERIC/BIGNUMERIC values are serialized as scaled integers in two's complement form in + // little endian + // order. BigInteger requires the same encoding but in big endian order, therefore we must + // reverse the bytes that come from the proto. + Bytes.reverse(bytes); + return ByteString.copyFrom(bytes); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClient.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClient.java new file mode 100644 index 000000000000..37fca03d1b9d --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClient.java @@ -0,0 +1,378 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta2.stub.EnhancedBigQueryReadStub; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +/** + * Service Description: BigQuery Read API. + * + *

The Read API can be used to read data from BigQuery. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

+ * 
+ * try (BigQueryReadClient BigQueryReadClient = BigQueryReadClient.create()) {
+ *   String parent = "";
+ *   ReadSession readSession = ReadSession.newBuilder().build();
+ *   int maxStreamCount = 0;
+ *   ReadSession response = BigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+ * }
+ * 
+ * 
+ * + *

Note: close() needs to be called on the BigQueryReadClient object to clean up resources such + * as threads. In the example above, try-with-resources is used, which automatically calls close(). + * + *

The surface of this class includes several types of Java methods for each of the API's + * methods: + * + *

    + *
  1. A "flattened" method. With this type of method, the fields of the request type have been + * converted into function parameters. It may be the case that not all fields are available as + * parameters, and not every API method will have a flattened method entry point. + *
  2. A "request object" method. This type of method only takes one parameter, a request object, + * which must be constructed before the call. Not every API method will have a request object + * method. + *
  3. A "callable" method. This type of method takes no parameters and returns an immutable API + * callable object, which can be used to initiate calls to the service. + *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of BigQueryReadSettings to + * create(). For example: + * + *

To customize credentials: + * + *

+ * 
+ * BigQueryReadSettings BigQueryReadSettings =
+ *     BigQueryReadSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * BigQueryReadClient BigQueryReadClient =
+ *     BigQueryReadClient.create(BigQueryReadSettings);
+ * 
+ * 
+ * + * To customize the endpoint: + * + *
+ * 
+ * BigQueryReadSettings BigQueryReadSettings =
+ *     BigQueryReadSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * BigQueryReadClient BigQueryReadClient =
+ *     BigQueryReadClient.create(BigQueryReadSettings);
+ * 
+ * 
+ */ +@BetaApi +public class BigQueryReadClient implements BackgroundResource { + private final BigQueryReadSettings settings; + private final EnhancedBigQueryReadStub stub; + + /** Constructs an instance of BigQueryReadClient with default settings. */ + public static final BigQueryReadClient create() throws IOException { + return create(BigQueryReadSettings.newBuilder().build()); + } + + /** + * Constructs an instance of BigQueryReadClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final BigQueryReadClient create(BigQueryReadSettings settings) throws IOException { + return new BigQueryReadClient(settings); + } + + /** + * Constructs an instance of BigQueryReadClient, using the given stub for making calls. This is + * for advanced usage - prefer to use BigQueryReadSettings}. + */ + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public static final BigQueryReadClient create(EnhancedBigQueryReadStub stub) { + return new BigQueryReadClient(stub); + } + + /** + * Constructs an instance of BigQueryReadClient, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected BigQueryReadClient(BigQueryReadSettings settings) throws IOException { + this.settings = settings; + this.stub = + EnhancedBigQueryReadStub.create( + settings.getTypedStubSettings(), settings.getReadRowsRetryAttemptListener()); + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + protected BigQueryReadClient(EnhancedBigQueryReadStub stub) { + this.settings = null; + this.stub = stub; + } + + public final BigQueryReadSettings getSettings() { + return settings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public EnhancedBigQueryReadStub getStub() { + return stub; + } + + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 24 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *


+   * try (BigQueryReadClient BigQueryReadClient = BigQueryReadClient.create()) {
+   *   String parent = "";
+   *   ReadSession readSession = ReadSession.newBuilder().build();
+   *   int maxStreamCount = 0;
+   *   ReadSession response = BigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+   * }
+   * 
+ * + * @param parent Required. The request project that owns the session, in the form of + * `projects/{project_id}`. + * @param readSession Required. Session to be created. + * @param maxStreamCount Max initial number of streams. If unset or zero, the server will provide + * a value of streams so as to produce reasonable throughput. Must be non-negative. The number + * of streams may be lower than the requested number, depending on the amount parallelism that + * is reasonable for the table. Error will be returned if the max count is greater than the + * current system max limit of 1,000. + *

Streams must be read starting from offset 0. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession( + String parent, ReadSession readSession, int maxStreamCount) { + + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parent) + .setReadSession(readSession) + .setMaxStreamCount(maxStreamCount) + .build(); + return createReadSession(request); + } + + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 24 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *


+   * try (BigQueryReadClient BigQueryReadClient = BigQueryReadClient.create()) {
+   *   CreateReadSessionRequest request = CreateReadSessionRequest.newBuilder().build();
+   *   ReadSession response = BigQueryReadClient.createReadSession(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession(CreateReadSessionRequest request) { + return createReadSessionCallable().call(request); + } + + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 24 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *


+   * try (BigQueryReadClient bigQueryReadClient = BigQueryReadClient.create()) {
+   *   CreateReadSessionRequest request = CreateReadSessionRequest.newBuilder().build();
+   *   ApiFuture<ReadSession> future = BigQueryReadClient.createReadSessionCallable().futureCall(request);
+   *   // Do something
+   *   ReadSession response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable createReadSessionCallable() { + return stub.createReadSessionCallable(); + } + + /** + * Reads rows from the stream in the format prescribed by the ReadSession. Each response contains + * one or more table rows, up to a maximum of 100 MiB per response; read requests which attempt to + * read individual rows larger than 100 MiB will fail. + * + *

Each request also returns a set of stream statistics reflecting the current state of the + * stream. + * + *

Sample code: + * + *


+   * try (BigQueryReadClient bigQueryReadClient = BigQueryReadClient.create()) {
+   *   ReadRowsRequest request = ReadRowsRequest.newBuilder().build();
+   *
+   *   ServerStream<ReadRowsResponse> stream = bigQueryReadClient.readRowsCallable().call(request);
+   *   for (ReadRowsResponse response : stream) {
+   *     // Do something when receive a response
+   *   }
+   * }
+   * 
+ */ + public final ServerStreamingCallable readRowsCallable() { + return stub.readRowsCallable(); + } + + /** + * Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are + * referred to as the primary and the residual streams of the split. The original `ReadStream` can + * still be read from in the same manner as before. Both of the returned `ReadStream` objects can + * also be read from, and the rows returned by both child streams will be the same as the rows + * read from the original stream. + * + *

Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. + * Concretely, it is guaranteed that for streams original, primary, and residual, that + * original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read + * to completion. + * + *

Sample code: + * + *


+   * try (BigQueryReadClient bigQueryReadClient = BigQueryReadClient.create()) {
+   *   SplitReadStreamRequest request = SplitReadStreamRequest.newBuilder().build();
+   *   SplitReadStreamResponse response = bigQueryReadClient.splitReadStream(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final SplitReadStreamResponse splitReadStream(SplitReadStreamRequest request) { + return splitReadStreamCallable().call(request); + } + + /** + * Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are + * referred to as the primary and the residual streams of the split. The original `ReadStream` can + * still be read from in the same manner as before. Both of the returned `ReadStream` objects can + * also be read from, and the rows returned by both child streams will be the same as the rows + * read from the original stream. + * + *

Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. + * Concretely, it is guaranteed that for streams original, primary, and residual, that + * original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read + * to completion. + * + *

Sample code: + * + *


+   * try (BigQueryReadClient bigQueryReadClient = BigQueryReadClient.create()) {
+   *   SplitReadStreamRequest request = SplitReadStreamRequest.newBuilder().build();
+   *   ApiFuture<SplitReadStreamResponse> future = bigQueryReadClient.splitReadStreamCallable().futureCall(request);
+   *   // Do something
+   *   SplitReadStreamResponse response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable + splitReadStreamCallable() { + return stub.splitReadStreamCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadSettings.java new file mode 100644 index 000000000000..19704a99c5cb --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadSettings.java @@ -0,0 +1,234 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta2.stub.EnhancedBigQueryReadStubSettings; +import io.grpc.Metadata; +import io.grpc.Status; +import java.io.IOException; +import java.util.List; + +/** + * Settings class to configure an instance of {@link BigQueryReadClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the total timeout of createReadSession to 30 seconds: + * + *

+ * 
+ * BigQueryReadSettings.Builder BigQueryReadSettingsBuilder =
+ *     BigQueryReadSettings.newBuilder();
+ * BigQueryReadSettingsBuilder.createReadSessionSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * BigQueryReadSettings BigQueryReadSettings = BigQueryReadSettingsBuilder.build();
+ * 
+ * 
+ */ +@BetaApi +public class BigQueryReadSettings extends ClientSettings { + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings createReadSessionSettings() { + return getTypedStubSettings().createReadSessionSettings(); + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings readRowsSettings() { + return getTypedStubSettings().readRowsSettings(); + } + + public static interface RetryAttemptListener { + public void onRetryAttempt(Status prevStatus, Metadata prevMetadata); + } + + private RetryAttemptListener readRowsRetryAttemptListener = null; + + /** + * If a non null readRowsRetryAttemptListener is provided, client will call onRetryAttempt + * function before a failed ReadRows request is retried. This can be used as negative feedback + * mechanism for future decision to split read streams because some retried failures are due to + * resource exhaustion that increased parallelism only makes it worse. + */ + public void setReadRowsRetryAttemptListener(RetryAttemptListener readRowsRetryAttemptListener) { + this.readRowsRetryAttemptListener = readRowsRetryAttemptListener; + } + + public RetryAttemptListener getReadRowsRetryAttemptListener() { + return readRowsRetryAttemptListener; + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return getTypedStubSettings().splitReadStreamSettings(); + } + + EnhancedBigQueryReadStubSettings getTypedStubSettings() { + return (EnhancedBigQueryReadStubSettings) getStubSettings(); + } + + public static final BigQueryReadSettings create(EnhancedBigQueryReadStubSettings stub) + throws IOException { + return new BigQueryReadSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return EnhancedBigQueryReadStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return EnhancedBigQueryReadStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return EnhancedBigQueryReadStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return EnhancedBigQueryReadStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return EnhancedBigQueryReadStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return EnhancedBigQueryReadStubSettings.defaultTransportChannelProvider(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return EnhancedBigQueryReadStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryReadSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for BigQueryReadSettings. */ + public static class Builder extends ClientSettings.Builder { + protected Builder() throws IOException { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(EnhancedBigQueryReadStubSettings.newBuilder(clientContext)); + } + + private static Builder createDefault() { + return new Builder(EnhancedBigQueryReadStubSettings.newBuilder()); + } + + protected Builder(BigQueryReadSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(EnhancedBigQueryReadStubSettings.Builder stubSettings) { + super(stubSettings); + } + + public EnhancedBigQueryReadStubSettings.Builder getStubSettingsBuilder() { + return ((EnhancedBigQueryReadStubSettings.Builder) getStubSettings()); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + private RetryAttemptListener readRowsRetryAttemptListener = null; + + public Builder setReadRowsRetryAttemptListener( + RetryAttemptListener readRowsRetryAttemptListener) { + this.readRowsRetryAttemptListener = readRowsRetryAttemptListener; + return this; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return getStubSettingsBuilder().createReadSessionSettings(); + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return getStubSettingsBuilder().readRowsSettings(); + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder + splitReadStreamSettings() { + return getStubSettingsBuilder().splitReadStreamSettings(); + } + + @Override + public BigQueryReadSettings build() throws IOException { + BigQueryReadSettings settings = new BigQueryReadSettings(this); + settings.setReadRowsRetryAttemptListener(readRowsRetryAttemptListener); + return settings; + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClient.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClient.java new file mode 100644 index 000000000000..d9937cfb43f8 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClient.java @@ -0,0 +1,986 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta2.stub.BigQueryWriteStub; +import com.google.cloud.bigquery.storage.v1beta2.stub.BigQueryWriteStubSettings; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: BigQuery Write API. + * + *

The Write API can be used to write data to BigQuery. + * + *

The [google.cloud.bigquery.storage.v1 + * API](/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1) should be used + * instead of the v1beta2 API for BigQueryWrite operations. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+ *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
+ *   WriteStream writeStream = WriteStream.newBuilder().build();
+ *   WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the BigQueryWriteClient object to clean up resources such + * as threads. In the example above, try-with-resources is used, which automatically calls close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

CreateWriteStream

Creates a write stream to the given table. Additionally, every table has a special COMMITTED stream named '_default' to which data can be written. This stream doesn't need to be created using CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. Data written to this stream is considered committed as soon as an acknowledgement is received.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createWriteStream(CreateWriteStreamRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createWriteStream(TableName parent, WriteStream writeStream) + *

  • createWriteStream(String parent, WriteStream writeStream) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createWriteStreamCallable() + *

+ *

AppendRows

Appends data to the given stream. + *

If `offset` is specified, the `offset` is checked against the end of stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an attempt is made to append to an offset beyond the current end of the stream or `ALREADY_EXISTS` if user provids an `offset` that has already been written to. User can retry with adjusted offset within the same RPC stream. If `offset` is not specified, append happens at the end of the stream. + *

The response contains the offset at which the append happened. Responses are received in the same order in which requests are sent. There will be one response for each successful request. If the `offset` is not set in response, it means append didn't happen due to some errors. If one request fails, all the subsequent requests will also fail until a success request is made again. + *

If the stream is of `PENDING` type, data will only be available for read operations after the stream is committed.

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • appendRowsCallable() + *

+ *

GetWriteStream

Gets a write stream.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getWriteStream(GetWriteStreamRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getWriteStream(WriteStreamName name) + *

  • getWriteStream(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getWriteStreamCallable() + *

+ *

FinalizeWriteStream

Finalize a write stream so that no new data can be appended to the stream. Finalize is not supported on the '_default' stream.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • finalizeWriteStream(FinalizeWriteStreamRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • finalizeWriteStream(WriteStreamName name) + *

  • finalizeWriteStream(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • finalizeWriteStreamCallable() + *

+ *

BatchCommitWriteStreams

Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams must be finalized before commit and cannot be committed multiple times. Once a stream is committed, data in the stream becomes available for read operations.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchCommitWriteStreams(BatchCommitWriteStreamsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • batchCommitWriteStreams(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchCommitWriteStreamsCallable() + *

+ *

FlushRows

Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush operation is required in order for the rows to become available for reading. A Flush operation flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in the request. Flush is not supported on the _default stream, since it is not BUFFERED.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • flushRows(FlushRowsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • flushRows(WriteStreamName writeStream) + *

  • flushRows(String writeStream) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • flushRowsCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of BigQueryWriteSettings to + * create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BigQueryWriteSettings bigQueryWriteSettings =
+ *     BigQueryWriteSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create(bigQueryWriteSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BigQueryWriteSettings bigQueryWriteSettings =
+ *     BigQueryWriteSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create(bigQueryWriteSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + * + * @deprecated This class is deprecated and will be removed in the next major version update. + */ +@BetaApi +@Deprecated +@Generated("by gapic-generator-java") +public class BigQueryWriteClient implements BackgroundResource { + private final BigQueryWriteSettings settings; + private final BigQueryWriteStub stub; + + /** Constructs an instance of BigQueryWriteClient with default settings. */ + public static final BigQueryWriteClient create() throws IOException { + return create(BigQueryWriteSettings.newBuilder().build()); + } + + /** + * Constructs an instance of BigQueryWriteClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final BigQueryWriteClient create(BigQueryWriteSettings settings) + throws IOException { + return new BigQueryWriteClient(settings); + } + + /** + * Constructs an instance of BigQueryWriteClient, using the given stub for making calls. This is + * for advanced usage - prefer using create(BigQueryWriteSettings). + */ + public static final BigQueryWriteClient create(BigQueryWriteStub stub) { + return new BigQueryWriteClient(stub); + } + + /** + * Constructs an instance of BigQueryWriteClient, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected BigQueryWriteClient(BigQueryWriteSettings settings) throws IOException { + this.settings = settings; + this.stub = ((BigQueryWriteStubSettings) settings.getStubSettings()).createStub(); + } + + protected BigQueryWriteClient(BigQueryWriteStub stub) { + this.settings = null; + this.stub = stub; + } + + public final BigQueryWriteSettings getSettings() { + return settings; + } + + public BigQueryWriteStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a write stream to the given table. Additionally, every table has a special COMMITTED + * stream named '_default' to which data can be written. This stream doesn't need to be created + * using CreateWriteStream. It is a stream that can be used simultaneously by any number of + * clients. Data written to this stream is considered committed as soon as an acknowledgement is + * received. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
+   *   WriteStream writeStream = WriteStream.newBuilder().build();
+   *   WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which the stream belongs, in the format of + * `projects/{project}/datasets/{dataset}/tables/{table}`. + * @param writeStream Required. Stream to be created. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final WriteStream createWriteStream(TableName parent, WriteStream writeStream) { + CreateWriteStreamRequest request = + CreateWriteStreamRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setWriteStream(writeStream) + .build(); + return createWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a write stream to the given table. Additionally, every table has a special COMMITTED + * stream named '_default' to which data can be written. This stream doesn't need to be created + * using CreateWriteStream. It is a stream that can be used simultaneously by any number of + * clients. Data written to this stream is considered committed as soon as an acknowledgement is + * received. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString();
+   *   WriteStream writeStream = WriteStream.newBuilder().build();
+   *   WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
+   * }
+   * }
+ * + * @param parent Required. Reference to the table to which the stream belongs, in the format of + * `projects/{project}/datasets/{dataset}/tables/{table}`. + * @param writeStream Required. Stream to be created. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final WriteStream createWriteStream(String parent, WriteStream writeStream) { + CreateWriteStreamRequest request = + CreateWriteStreamRequest.newBuilder().setParent(parent).setWriteStream(writeStream).build(); + return createWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a write stream to the given table. Additionally, every table has a special COMMITTED + * stream named '_default' to which data can be written. This stream doesn't need to be created + * using CreateWriteStream. It is a stream that can be used simultaneously by any number of + * clients. Data written to this stream is considered committed as soon as an acknowledgement is + * received. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   CreateWriteStreamRequest request =
+   *       CreateWriteStreamRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .setWriteStream(WriteStream.newBuilder().build())
+   *           .build();
+   *   WriteStream response = bigQueryWriteClient.createWriteStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final WriteStream createWriteStream(CreateWriteStreamRequest request) { + return createWriteStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a write stream to the given table. Additionally, every table has a special COMMITTED + * stream named '_default' to which data can be written. This stream doesn't need to be created + * using CreateWriteStream. It is a stream that can be used simultaneously by any number of + * clients. Data written to this stream is considered committed as soon as an acknowledgement is + * received. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   CreateWriteStreamRequest request =
+   *       CreateWriteStreamRequest.newBuilder()
+   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
+   *           .setWriteStream(WriteStream.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.createWriteStreamCallable().futureCall(request);
+   *   // Do something.
+   *   WriteStream response = future.get();
+   * }
+   * }
+ * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final UnaryCallable createWriteStreamCallable() { + return stub.createWriteStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Appends data to the given stream. + * + *

If `offset` is specified, the `offset` is checked against the end of stream. The server + * returns `OUT_OF_RANGE` in `AppendRowsResponse` if an attempt is made to append to an offset + * beyond the current end of the stream or `ALREADY_EXISTS` if user provids an `offset` that has + * already been written to. User can retry with adjusted offset within the same RPC stream. If + * `offset` is not specified, append happens at the end of the stream. + * + *

The response contains the offset at which the append happened. Responses are received in the + * same order in which requests are sent. There will be one response for each successful request. + * If the `offset` is not set in response, it means append didn't happen due to some errors. If + * one request fails, all the subsequent requests will also fail until a success request is made + * again. + * + *

If the stream is of `PENDING` type, data will only be available for read operations after + * the stream is committed. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   BidiStream bidiStream =
+   *       bigQueryWriteClient.appendRowsCallable().call();
+   *   AppendRowsRequest request =
+   *       AppendRowsRequest.newBuilder()
+   *           .setWriteStream(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .setOffset(Int64Value.newBuilder().build())
+   *           .setTraceId("traceId-1067401920")
+   *           .build();
+   *   bidiStream.send(request);
+   *   for (AppendRowsResponse response : bidiStream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final BidiStreamingCallable appendRowsCallable() { + return stub.appendRowsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets a write stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+   *   WriteStream response = bigQueryWriteClient.getWriteStream(name);
+   * }
+   * }
+ * + * @param name Required. Name of the stream to get, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final WriteStream getWriteStream(WriteStreamName name) { + GetWriteStreamRequest request = + GetWriteStreamRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets a write stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString();
+   *   WriteStream response = bigQueryWriteClient.getWriteStream(name);
+   * }
+   * }
+ * + * @param name Required. Name of the stream to get, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final WriteStream getWriteStream(String name) { + GetWriteStreamRequest request = GetWriteStreamRequest.newBuilder().setName(name).build(); + return getWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets a write stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   GetWriteStreamRequest request =
+   *       GetWriteStreamRequest.newBuilder()
+   *           .setName(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .build();
+   *   WriteStream response = bigQueryWriteClient.getWriteStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final WriteStream getWriteStream(GetWriteStreamRequest request) { + return getWriteStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets a write stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   GetWriteStreamRequest request =
+   *       GetWriteStreamRequest.newBuilder()
+   *           .setName(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.getWriteStreamCallable().futureCall(request);
+   *   // Do something.
+   *   WriteStream response = future.get();
+   * }
+   * }
+ * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final UnaryCallable getWriteStreamCallable() { + return stub.getWriteStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+   *   FinalizeWriteStreamResponse response = bigQueryWriteClient.finalizeWriteStream(name);
+   * }
+   * }
+ * + * @param name Required. Name of the stream to finalize, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final FinalizeWriteStreamResponse finalizeWriteStream(WriteStreamName name) { + FinalizeWriteStreamRequest request = + FinalizeWriteStreamRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return finalizeWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString();
+   *   FinalizeWriteStreamResponse response = bigQueryWriteClient.finalizeWriteStream(name);
+   * }
+   * }
+ * + * @param name Required. Name of the stream to finalize, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final FinalizeWriteStreamResponse finalizeWriteStream(String name) { + FinalizeWriteStreamRequest request = + FinalizeWriteStreamRequest.newBuilder().setName(name).build(); + return finalizeWriteStream(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FinalizeWriteStreamRequest request =
+   *       FinalizeWriteStreamRequest.newBuilder()
+   *           .setName(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .build();
+   *   FinalizeWriteStreamResponse response = bigQueryWriteClient.finalizeWriteStream(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final FinalizeWriteStreamResponse finalizeWriteStream(FinalizeWriteStreamRequest request) { + return finalizeWriteStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FinalizeWriteStreamRequest request =
+   *       FinalizeWriteStreamRequest.newBuilder()
+   *           .setName(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.finalizeWriteStreamCallable().futureCall(request);
+   *   // Do something.
+   *   FinalizeWriteStreamResponse response = future.get();
+   * }
+   * }
+ * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final UnaryCallable + finalizeWriteStreamCallable() { + return stub.finalizeWriteStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams + * must be finalized before commit and cannot be committed multiple times. Once a stream is + * committed, data in the stream becomes available for read operations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String parent = "parent-995424086";
+   *   BatchCommitWriteStreamsResponse response =
+   *       bigQueryWriteClient.batchCommitWriteStreams(parent);
+   * }
+   * }
+ * + * @param parent Required. Parent table that all the streams should belong to, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final BatchCommitWriteStreamsResponse batchCommitWriteStreams(String parent) { + BatchCommitWriteStreamsRequest request = + BatchCommitWriteStreamsRequest.newBuilder().setParent(parent).build(); + return batchCommitWriteStreams(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams + * must be finalized before commit and cannot be committed multiple times. Once a stream is + * committed, data in the stream becomes available for read operations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   BatchCommitWriteStreamsRequest request =
+   *       BatchCommitWriteStreamsRequest.newBuilder()
+   *           .setParent("parent-995424086")
+   *           .addAllWriteStreams(new ArrayList())
+   *           .build();
+   *   BatchCommitWriteStreamsResponse response =
+   *       bigQueryWriteClient.batchCommitWriteStreams(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final BatchCommitWriteStreamsResponse batchCommitWriteStreams( + BatchCommitWriteStreamsRequest request) { + return batchCommitWriteStreamsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams + * must be finalized before commit and cannot be committed multiple times. Once a stream is + * committed, data in the stream becomes available for read operations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   BatchCommitWriteStreamsRequest request =
+   *       BatchCommitWriteStreamsRequest.newBuilder()
+   *           .setParent("parent-995424086")
+   *           .addAllWriteStreams(new ArrayList())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.batchCommitWriteStreamsCallable().futureCall(request);
+   *   // Do something.
+   *   BatchCommitWriteStreamsResponse response = future.get();
+   * }
+   * }
+ * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final UnaryCallable + batchCommitWriteStreamsCallable() { + return stub.batchCommitWriteStreamsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush + * operation is required in order for the rows to become available for reading. A Flush operation + * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in + * the request. Flush is not supported on the _default stream, since it is not BUFFERED. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   WriteStreamName writeStream =
+   *       WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
+   *   FlushRowsResponse response = bigQueryWriteClient.flushRows(writeStream);
+   * }
+   * }
+ * + * @param writeStream Required. The stream that is the target of the flush operation. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final FlushRowsResponse flushRows(WriteStreamName writeStream) { + FlushRowsRequest request = + FlushRowsRequest.newBuilder() + .setWriteStream(writeStream == null ? null : writeStream.toString()) + .build(); + return flushRows(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush + * operation is required in order for the rows to become available for reading. A Flush operation + * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in + * the request. Flush is not supported on the _default stream, since it is not BUFFERED. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   String writeStream =
+   *       WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString();
+   *   FlushRowsResponse response = bigQueryWriteClient.flushRows(writeStream);
+   * }
+   * }
+ * + * @param writeStream Required. The stream that is the target of the flush operation. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final FlushRowsResponse flushRows(String writeStream) { + FlushRowsRequest request = FlushRowsRequest.newBuilder().setWriteStream(writeStream).build(); + return flushRows(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush + * operation is required in order for the rows to become available for reading. A Flush operation + * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in + * the request. Flush is not supported on the _default stream, since it is not BUFFERED. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FlushRowsRequest request =
+   *       FlushRowsRequest.newBuilder()
+   *           .setWriteStream(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .setOffset(Int64Value.newBuilder().build())
+   *           .build();
+   *   FlushRowsResponse response = bigQueryWriteClient.flushRows(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final FlushRowsResponse flushRows(FlushRowsRequest request) { + return flushRowsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush + * operation is required in order for the rows to become available for reading. A Flush operation + * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in + * the request. Flush is not supported on the _default stream, since it is not BUFFERED. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FlushRowsRequest request =
+   *       FlushRowsRequest.newBuilder()
+   *           .setWriteStream(
+   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
+   *           .setOffset(Int64Value.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       bigQueryWriteClient.flushRowsCallable().futureCall(request);
+   *   // Do something.
+   *   FlushRowsResponse response = future.get();
+   * }
+   * }
+ * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public final UnaryCallable flushRowsCallable() { + return stub.flushRowsCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteSettings.java new file mode 100644 index 000000000000..f26b59ffd7b1 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteSettings.java @@ -0,0 +1,320 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta2.stub.BigQueryWriteStubSettings; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BigQueryWriteClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createWriteStream: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BigQueryWriteSettings.Builder bigQueryWriteSettingsBuilder = BigQueryWriteSettings.newBuilder();
+ * bigQueryWriteSettingsBuilder
+ *     .createWriteStreamSettings()
+ *     .setRetrySettings(
+ *         bigQueryWriteSettingsBuilder
+ *             .createWriteStreamSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * BigQueryWriteSettings bigQueryWriteSettings = bigQueryWriteSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + * + * @deprecated This class is deprecated and will be removed in the next major version update. + */ +@BetaApi +@Deprecated +@Generated("by gapic-generator-java") +public class BigQueryWriteSettings extends ClientSettings { + + /** + * Returns the object with the settings used for calls to createWriteStream. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings createWriteStreamSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).createWriteStreamSettings(); + } + + /** + * Returns the object with the settings used for calls to appendRows. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public StreamingCallSettings appendRowsSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).appendRowsSettings(); + } + + /** + * Returns the object with the settings used for calls to getWriteStream. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings getWriteStreamSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).getWriteStreamSettings(); + } + + /** + * Returns the object with the settings used for calls to finalizeWriteStream. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings + finalizeWriteStreamSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).finalizeWriteStreamSettings(); + } + + /** + * Returns the object with the settings used for calls to batchCommitWriteStreams. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings + batchCommitWriteStreamsSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).batchCommitWriteStreamsSettings(); + } + + /** + * Returns the object with the settings used for calls to flushRows. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings flushRowsSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).flushRowsSettings(); + } + + public static final BigQueryWriteSettings create(BigQueryWriteStubSettings stub) + throws IOException { + return new BigQueryWriteSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return BigQueryWriteStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return BigQueryWriteStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return BigQueryWriteStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return BigQueryWriteStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return BigQueryWriteStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return BigQueryWriteStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return BigQueryWriteStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryWriteSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for BigQueryWriteSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(BigQueryWriteStubSettings.newBuilder(clientContext)); + } + + protected Builder(BigQueryWriteSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(BigQueryWriteStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(BigQueryWriteStubSettings.newBuilder()); + } + + public BigQueryWriteStubSettings.Builder getStubSettingsBuilder() { + return ((BigQueryWriteStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** + * Returns the builder for the settings used for calls to createWriteStream. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings.Builder + createWriteStreamSettings() { + return getStubSettingsBuilder().createWriteStreamSettings(); + } + + /** + * Returns the builder for the settings used for calls to appendRows. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public StreamingCallSettings.Builder + appendRowsSettings() { + return getStubSettingsBuilder().appendRowsSettings(); + } + + /** + * Returns the builder for the settings used for calls to getWriteStream. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings.Builder getWriteStreamSettings() { + return getStubSettingsBuilder().getWriteStreamSettings(); + } + + /** + * Returns the builder for the settings used for calls to finalizeWriteStream. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings.Builder + finalizeWriteStreamSettings() { + return getStubSettingsBuilder().finalizeWriteStreamSettings(); + } + + /** + * Returns the builder for the settings used for calls to batchCommitWriteStreams. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings.Builder< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsSettings() { + return getStubSettingsBuilder().batchCommitWriteStreamsSettings(); + } + + /** + * Returns the builder for the settings used for calls to flushRows. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings.Builder flushRowsSettings() { + return getStubSettingsBuilder().flushRowsSettings(); + } + + @Override + public BigQueryWriteSettings build() throws IOException { + return new BigQueryWriteSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoder.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoder.java new file mode 100644 index 000000000000..58bc7ec5fb08 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoder.java @@ -0,0 +1,359 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import static com.google.cloud.bigquery.storage.util.TimeConversionUtils.toJavaTimeLocalDateTime; +import static com.google.cloud.bigquery.storage.util.TimeConversionUtils.toJavaTimeLocalTime; +import static com.google.cloud.bigquery.storage.util.TimeConversionUtils.toThreetenLocalDateTime; +import static com.google.cloud.bigquery.storage.util.TimeConversionUtils.toThreetenLocalTime; +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.api.core.ObsoleteApi; + +/** + * Ported from ZetaSQL CivilTimeEncoder Original code can be found at: + * https://github.com/google/zetasql/blob/master/java/com/google/zetasql/CivilTimeEncoder.java + * Encoder for TIME and DATETIME values, according to civil_time encoding. + * + *

The valid range and number of bits required by each date/time field is as the following: + * + * + * + * + * + * + * + * + * + * + * + * + *
Range and bits for date/time fields
Field Range #Bits
Year [1, 9999] 14
Month [1, 12] 4
Day [1, 31] 5
Hour [0, 23] 5
Minute [0, 59] 6
Second [0, 59]* 6
Micros [0, 999999] 20
Nanos [0, 999999999] 30
+ * + *

* Leap second is not supported. + * + *

When encoding the TIME or DATETIME into a bit field, larger date/time field is on the more + * significant side. + */ +@Deprecated +public final class CivilTimeEncoder { + private static final int NANO_LENGTH = 30; + private static final int MICRO_LENGTH = 20; + + private static final int NANO_SHIFT = 0; + private static final int MICRO_SHIFT = 0; + private static final int SECOND_SHIFT = 0; + private static final int MINUTE_SHIFT = 6; + private static final int HOUR_SHIFT = 12; + private static final int DAY_SHIFT = 17; + private static final int MONTH_SHIFT = 22; + private static final int YEAR_SHIFT = 26; + + private static final long NANO_MASK = 0x3FFFFFFFL; + private static final long MICRO_MASK = 0xFFFFFL; + private static final long SECOND_MASK = 0x3FL; + private static final long MINUTE_MASK = 0xFC0L; + private static final long HOUR_MASK = 0x1F000L; + private static final long DAY_MASK = 0x3E0000L; + private static final long MONTH_MASK = 0x3C00000L; + private static final long YEAR_MASK = 0xFFFC000000L; + + private static final long TIME_SECONDS_MASK = 0x1FFFFL; + private static final long TIME_MICROS_MASK = 0x1FFFFFFFFFL; + private static final long TIME_NANOS_MASK = 0x7FFFFFFFFFFFL; + private static final long DATETIME_SECONDS_MASK = 0xFFFFFFFFFFL; + private static final long DATETIME_MICROS_MASK = 0xFFFFFFFFFFFFFFFL; + + /** + * Encodes {@code time} as a 4-byte integer with seconds precision. + * + *

Encoding is as the following: + * + *

+   *      3         2         1
+   * MSB 10987654321098765432109876543210 LSB
+   *                    | H ||  M ||  S |
+   * 
+ * + * @see #decodePacked32TimeSeconds(int) + */ + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + private static int encodePacked32TimeSeconds(java.time.LocalTime time) { + checkValidTimeSeconds(time); + int bitFieldTimeSeconds = 0x0; + bitFieldTimeSeconds |= time.getHour() << HOUR_SHIFT; + bitFieldTimeSeconds |= time.getMinute() << MINUTE_SHIFT; + bitFieldTimeSeconds |= time.getSecond() << SECOND_SHIFT; + return bitFieldTimeSeconds; + } + + /** + * Decodes {@code bitFieldTimeSeconds} as a {@link LocalTime} with seconds precision. + * + *

Encoding is as the following: + * + *

+   *      3         2         1
+   * MSB 10987654321098765432109876543210 LSB
+   *                    | H ||  M ||  S |
+   * 
+ * + * @see #encodePacked32TimeSeconds(LocalTime) + */ + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + private static java.time.LocalTime decodePacked32TimeSeconds(int bitFieldTimeSeconds) { + checkValidBitField(bitFieldTimeSeconds, TIME_SECONDS_MASK); + int hourOfDay = getFieldFromBitField(bitFieldTimeSeconds, HOUR_MASK, HOUR_SHIFT); + int minuteOfHour = getFieldFromBitField(bitFieldTimeSeconds, MINUTE_MASK, MINUTE_SHIFT); + int secondOfMinute = getFieldFromBitField(bitFieldTimeSeconds, SECOND_MASK, SECOND_SHIFT); + // LocalTime validates the input parameters. + try { + return java.time.LocalTime.of(hourOfDay, minuteOfHour, secondOfMinute); + } catch (java.time.DateTimeException e) { + throw new IllegalArgumentException(e.getMessage(), e); + } + } + + /** + * This method is obsolete. Use {@link #encodePacked64TimeMicrosLocalTime(java.time.LocalTime)} + * instead. + */ + @ObsoleteApi("Use encodePacked64TimeMicrosLocalTime(java.time.LocalTime) instead") + public static long encodePacked64TimeMicros(org.threeten.bp.LocalTime time) { + return encodePacked64TimeMicrosLocalTime(toJavaTimeLocalTime(time)); + } + + /** + * Encodes {@code time} as a 8-byte integer with microseconds precision. + * + *

Encoding is as the following: + * + *

+   *        6         5         4         3         2         1
+   * MSB 3210987654321098765432109876543210987654321098765432109876543210 LSB
+   *                                | H ||  M ||  S ||-------micros-----|
+   * 
+ * + * @see #decodePacked64TimeMicros(long) + * @see #encodePacked64TimeMicros(LocalTime) + */ + @SuppressWarnings("GoodTime") + public static long encodePacked64TimeMicrosLocalTime(java.time.LocalTime time) { + checkValidTimeMicros(time); + return (((long) encodePacked32TimeSeconds(time)) << MICRO_LENGTH) | (time.getNano() / 1_000L); + } + + /** This method is obsolete. Use {@link #decodePacked64TimeMicrosLocalTime(long)} instead. */ + @ObsoleteApi("Use decodePacked64TimeMicrosLocalTime(long) instead") + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + public static org.threeten.bp.LocalTime decodePacked64TimeMicros(long bitFieldTimeMicros) { + return toThreetenLocalTime(decodePacked64TimeMicrosLocalTime(bitFieldTimeMicros)); + } + + /** + * Decodes {@code bitFieldTimeMicros} as a {@link java.time.LocalTime} with microseconds + * precision. + * + *

Encoding is as the following: + * + *

+   *        6         5         4         3         2         1
+   * MSB 3210987654321098765432109876543210987654321098765432109876543210 LSB
+   *                                | H ||  M ||  S ||-------micros-----|
+   * 
+ * + * @see #encodePacked64TimeMicros(LocalTime) + */ + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + public static java.time.LocalTime decodePacked64TimeMicrosLocalTime(long bitFieldTimeMicros) { + checkValidBitField(bitFieldTimeMicros, TIME_MICROS_MASK); + int bitFieldTimeSeconds = (int) (bitFieldTimeMicros >> MICRO_LENGTH); + java.time.LocalTime timeSeconds = decodePacked32TimeSeconds(bitFieldTimeSeconds); + int microOfSecond = getFieldFromBitField(bitFieldTimeMicros, MICRO_MASK, MICRO_SHIFT); + checkValidMicroOfSecond(microOfSecond); + java.time.LocalTime time = timeSeconds.withNano(microOfSecond * 1000); + checkValidTimeMicros(time); + return time; + } + + /** + * Encodes {@code dateTime} as a 8-byte integer with seconds precision. + * + *

Encoding is as the following: + * + *

+   *        6         5         4         3         2         1
+   * MSB 3210987654321098765432109876543210987654321098765432109876543210 LSB
+   *                             |--- year ---||m || D || H ||  M ||  S |
+   * 
+ * + * @see #decodePacked64DatetimeSeconds(long) + */ + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + private static long encodePacked64DatetimeSeconds(java.time.LocalDateTime dateTime) { + checkValidDateTimeSeconds(dateTime); + long bitFieldDatetimeSeconds = 0x0L; + bitFieldDatetimeSeconds |= (long) dateTime.getYear() << YEAR_SHIFT; + bitFieldDatetimeSeconds |= (long) dateTime.getMonthValue() << MONTH_SHIFT; + bitFieldDatetimeSeconds |= (long) dateTime.getDayOfMonth() << DAY_SHIFT; + bitFieldDatetimeSeconds |= (long) encodePacked32TimeSeconds(dateTime.toLocalTime()); + return bitFieldDatetimeSeconds; + } + + /** + * Decodes {@code bitFieldDatetimeSeconds} as a {@link LocalDateTime} with seconds precision. + * + *

Encoding is as the following: + * + *

+   *        6         5         4         3         2         1
+   * MSB 3210987654321098765432109876543210987654321098765432109876543210 LSBa
+   *                             |--- year ---||m || D || H ||  M ||  S |
+   * 
+ * + * @see #encodePacked64DatetimeSeconds(LocalDateTime) + */ + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + private static java.time.LocalDateTime decodePacked64DatetimeSeconds( + long bitFieldDatetimeSeconds) { + checkValidBitField(bitFieldDatetimeSeconds, DATETIME_SECONDS_MASK); + int bitFieldTimeSeconds = (int) (bitFieldDatetimeSeconds & TIME_SECONDS_MASK); + java.time.LocalTime timeSeconds = decodePacked32TimeSeconds(bitFieldTimeSeconds); + int year = getFieldFromBitField(bitFieldDatetimeSeconds, YEAR_MASK, YEAR_SHIFT); + int monthOfYear = getFieldFromBitField(bitFieldDatetimeSeconds, MONTH_MASK, MONTH_SHIFT); + int dayOfMonth = getFieldFromBitField(bitFieldDatetimeSeconds, DAY_MASK, DAY_SHIFT); + try { + java.time.LocalDateTime dateTime = + java.time.LocalDateTime.of( + year, + monthOfYear, + dayOfMonth, + timeSeconds.getHour(), + timeSeconds.getMinute(), + timeSeconds.getSecond()); + checkValidDateTimeSeconds(dateTime); + return dateTime; + } catch (java.time.DateTimeException e) { + throw new IllegalArgumentException(e.getMessage(), e); + } + } + + /** + * This method is obsolete. Use {@link + * #encodePacked64DatetimeMicrosLocalDateTime(java.time.LocalDateTime)} instead. + */ + @ObsoleteApi("Use encodePacked64DatetimeMicrosLocalDateTime(java.time.LocalDateTime) instead") + @SuppressWarnings({"GoodTime-ApiWithNumericTimeUnit", "JavaLocalDateTimeGetNano"}) + public static long encodePacked64DatetimeMicros(org.threeten.bp.LocalDateTime dateTime) { + return encodePacked64DatetimeMicrosLocalDateTime(toJavaTimeLocalDateTime(dateTime)); + } + + /** + * Encodes {@code dateTime} as a 8-byte integer with microseconds precision. + * + *

Encoding is as the following: + * + *

+   *        6         5         4         3         2         1
+   * MSB 3210987654321098765432109876543210987654321098765432109876543210 LSB
+   *         |--- year ---||m || D || H ||  M ||  S ||-------micros-----|
+   * 
+ * + * @see #decodePacked64DatetimeMicros(long) + */ + @SuppressWarnings({"GoodTime-ApiWithNumericTimeUnit", "JavaLocalDateTimeGetNano"}) + public static long encodePacked64DatetimeMicrosLocalDateTime(java.time.LocalDateTime dateTime) { + checkValidDateTimeMicros(dateTime); + return (encodePacked64DatetimeSeconds(dateTime) << MICRO_LENGTH) + | (dateTime.getNano() / 1_000L); + } + + /** + * This method is obsolete. Use {@link #decodePacked64DatetimeMicrosLocalDateTime(long)} instead. + */ + @ObsoleteApi("Use decodePacked64DatetimeMicrosLocalDateTime(long) instead") + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + public static org.threeten.bp.LocalDateTime decodePacked64DatetimeMicros( + long bitFieldDatetimeMicros) { + return toThreetenLocalDateTime( + decodePacked64DatetimeMicrosLocalDateTime(bitFieldDatetimeMicros)); + } + + /** + * Decodes {@code bitFieldDatetimeMicros} as a {@link java.time.LocalDateTime} with microseconds + * precision. + * + *

Encoding is as the following: + * + *

+   *        6         5         4         3         2         1
+   * MSB 3210987654321098765432109876543210987654321098765432109876543210 LSB
+   *         |--- year ---||m || D || H ||  M ||  S ||-------micros-----|
+   * 
+ * + * @see #encodePacked64DatetimeMicros(LocalDateTime) + */ + @SuppressWarnings("GoodTime-ApiWithNumericTimeUnit") + public static java.time.LocalDateTime decodePacked64DatetimeMicrosLocalDateTime( + long bitFieldDatetimeMicros) { + checkValidBitField(bitFieldDatetimeMicros, DATETIME_MICROS_MASK); + long bitFieldDatetimeSeconds = bitFieldDatetimeMicros >> MICRO_LENGTH; + java.time.LocalDateTime dateTimeSeconds = + decodePacked64DatetimeSeconds(bitFieldDatetimeSeconds); + int microOfSecond = getFieldFromBitField(bitFieldDatetimeMicros, MICRO_MASK, MICRO_SHIFT); + checkValidMicroOfSecond(microOfSecond); + java.time.LocalDateTime dateTime = dateTimeSeconds.withNano(microOfSecond * 1_000); + checkValidDateTimeMicros(dateTime); + return dateTime; + } + + private static int getFieldFromBitField(long bitField, long mask, int shift) { + return (int) ((bitField & mask) >> shift); + } + + private static void checkValidTimeSeconds(java.time.LocalTime time) { + checkArgument(time.getHour() >= 0 && time.getHour() <= 23); + checkArgument(time.getMinute() >= 0 && time.getMinute() <= 59); + checkArgument(time.getSecond() >= 0 && time.getSecond() <= 59); + } + + private static void checkValidDateTimeSeconds(java.time.LocalDateTime dateTime) { + checkArgument(dateTime.getYear() >= 1 && dateTime.getYear() <= 9999); + checkArgument(dateTime.getMonthValue() >= 1 && dateTime.getMonthValue() <= 12); + checkArgument(dateTime.getDayOfMonth() >= 1 && dateTime.getDayOfMonth() <= 31); + checkValidTimeSeconds(dateTime.toLocalTime()); + } + + private static void checkValidTimeMicros(java.time.LocalTime time) { + checkValidTimeSeconds(time); + checkArgument(time.equals(time.truncatedTo(java.time.temporal.ChronoUnit.MICROS))); + } + + private static void checkValidDateTimeMicros(java.time.LocalDateTime dateTime) { + checkValidDateTimeSeconds(dateTime); + checkArgument(dateTime.equals(dateTime.truncatedTo(java.time.temporal.ChronoUnit.MICROS))); + } + + private static void checkValidMicroOfSecond(int microOfSecond) { + checkArgument(microOfSecond >= 0 && microOfSecond <= 999999); + } + + private static void checkValidBitField(long bitField, long mask) { + checkArgument((bitField & ~mask) == 0x0L); + } + + private CivilTimeEncoder() {} +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriter.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriter.java new file mode 100644 index 000000000000..2fe44eee2509 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriter.java @@ -0,0 +1,343 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.batching.FlowControlSettings; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.common.base.Preconditions; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Message; +import java.io.IOException; +import java.util.logging.Logger; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import javax.annotation.Nullable; +import org.json.JSONArray; +import org.json.JSONObject; + +/** + * A StreamWriter that can write JSON data (JSONObjects) to BigQuery tables. The JsonStreamWriter is + * built on top of a StreamWriter, and it simply converts all JSON data to protobuf messages then + * calls StreamWriter's append() method to write to BigQuery tables. + * + *

This client lib is deprecated, please use v1 instead. + */ +@Deprecated +public class JsonStreamWriter implements AutoCloseable { + private static String streamPatternString = + "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+"; + private static Pattern streamPattern = Pattern.compile(streamPatternString); + private static final Logger LOG = Logger.getLogger(JsonStreamWriter.class.getName()); + + private BigQueryWriteClient client; + private String streamName; + private StreamWriterV2 streamWriter; + private StreamWriterV2.Builder streamWriterBuilder; + private Descriptor descriptor; + private TableSchema tableSchema; + + /** + * Constructs the JsonStreamWriter + * + * @param builder The Builder object for the JsonStreamWriter + */ + private JsonStreamWriter(Builder builder) + throws Descriptors.DescriptorValidationException, + IllegalArgumentException, + IOException, + InterruptedException { + this.client = builder.client; + this.descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(builder.tableSchema); + + if (this.client == null) { + streamWriterBuilder = StreamWriterV2.newBuilder(builder.streamName); + } else { + streamWriterBuilder = StreamWriterV2.newBuilder(builder.streamName, builder.client); + } + streamWriterBuilder.setWriterSchema(ProtoSchemaConverter.convert(this.descriptor)); + setStreamWriterSettings( + builder.channelProvider, + builder.credentialsProvider, + builder.endpoint, + builder.flowControlSettings, + builder.traceId); + this.streamWriter = streamWriterBuilder.build(); + this.streamName = builder.streamName; + this.tableSchema = builder.tableSchema; + } + + /** + * Writes a JSONArray that contains JSONObjects to the BigQuery table by first converting the JSON + * data to protobuf messages, then using StreamWriter's append() to write the data. + * + * @param jsonArr The JSON array that contains JSONObjects to be written + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture + */ + public ApiFuture append(JSONArray jsonArr) { + return append(jsonArr, -1); + } + + /** + * Writes a JSONArray that contains JSONObjects to the BigQuery table by first converting the JSON + * data to protobuf messages, then using StreamWriter's append() to write the data. + * + * @param jsonArr The JSON array that contains JSONObjects to be written + * @param offset Offset for deduplication + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture + */ + public ApiFuture append(JSONArray jsonArr, long offset) { + ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); + // Any error in convertJsonToProtoMessage will throw an + // IllegalArgumentException/IllegalStateException/NullPointerException and will halt processing + // of JSON data. + for (int i = 0; i < jsonArr.length(); i++) { + JSONObject json = jsonArr.getJSONObject(i); + Message protoMessage = + JsonToProtoMessage.convertJsonToProtoMessage(this.descriptor, this.tableSchema, json); + rowsBuilder.addSerializedRows(protoMessage.toByteString()); + } + // Need to make sure refreshAppendAndSetDescriptor finish first before this can run + synchronized (this) { + final ApiFuture appendResponseFuture = + this.streamWriter.append(rowsBuilder.build(), offset); + return appendResponseFuture; + } + } + + /** + * Gets streamName + * + * @return String + */ + public String getStreamName() { + return this.streamName; + } + + /** + * Gets current descriptor + * + * @return Descriptor + */ + public Descriptor getDescriptor() { + return this.descriptor; + } + + /** Sets all StreamWriter settings. */ + private void setStreamWriterSettings( + @Nullable TransportChannelProvider channelProvider, + @Nullable CredentialsProvider credentialsProvider, + @Nullable String endpoint, + @Nullable FlowControlSettings flowControlSettings, + @Nullable String traceId) { + if (channelProvider != null) { + streamWriterBuilder.setChannelProvider(channelProvider); + } + if (credentialsProvider != null) { + streamWriterBuilder.setCredentialsProvider(credentialsProvider); + } + if (endpoint != null) { + streamWriterBuilder.setEndpoint(endpoint); + } + if (traceId != null) { + streamWriterBuilder.setTraceId("JsonWriterBeta_" + traceId); + } else { + streamWriterBuilder.setTraceId("JsonWriterBeta:null"); + } + if (flowControlSettings != null) { + if (flowControlSettings.getMaxOutstandingRequestBytes() != null) { + streamWriterBuilder.setMaxInflightBytes( + flowControlSettings.getMaxOutstandingRequestBytes()); + } + if (flowControlSettings.getMaxOutstandingElementCount() != null) { + streamWriterBuilder.setMaxInflightRequests( + flowControlSettings.getMaxOutstandingElementCount()); + } + } + } + + /** + * newBuilder that constructs a JsonStreamWriter builder with BigQuery client being initialized by + * StreamWriter by default. + * + * @param streamOrTableName name of the stream that must follow + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" or table name + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+" + * @param tableSchema The schema of the table when the stream was created, which is passed back + * through {@code WriteStream} + * @return Builder + */ + public static Builder newBuilder(String streamOrTableName, TableSchema tableSchema) { + Preconditions.checkNotNull(streamOrTableName, "StreamOrTableName is null."); + Preconditions.checkNotNull(tableSchema, "TableSchema is null."); + return new Builder(streamOrTableName, tableSchema, null); + } + + /** + * newBuilder that constructs a JsonStreamWriter builder. + * + * @param streamOrTableName name of the stream that must follow + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" + * @param tableSchema The schema of the table when the stream was created, which is passed back + * through {@code WriteStream} + * @param client + * @return Builder + */ + public static Builder newBuilder( + String streamOrTableName, TableSchema tableSchema, BigQueryWriteClient client) { + Preconditions.checkNotNull(streamOrTableName, "StreamName is null."); + Preconditions.checkNotNull(tableSchema, "TableSchema is null."); + Preconditions.checkNotNull(client, "BigQuery client is null."); + return new Builder(streamOrTableName, tableSchema, client); + } + + /** Closes the underlying StreamWriter. */ + @Override + public void close() { + this.streamWriter.close(); + } + + public static final class Builder { + private String streamName; + private BigQueryWriteClient client; + private TableSchema tableSchema; + + private TransportChannelProvider channelProvider; + private CredentialsProvider credentialsProvider; + private FlowControlSettings flowControlSettings; + private String endpoint; + private boolean createDefaultStream = false; + private String traceId; + + private static String streamPatternString = + "(projects/[^/]+/datasets/[^/]+/tables/[^/]+)/streams/[^/]+"; + private static String tablePatternString = "(projects/[^/]+/datasets/[^/]+/tables/[^/]+)"; + + private static Pattern streamPattern = Pattern.compile(streamPatternString); + private static Pattern tablePattern = Pattern.compile(tablePatternString); + + /** + * Constructor for JsonStreamWriter's Builder + * + * @param streamOrTableName name of the stream that must follow + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" or + * "projects/[^/]+/datasets/[^/]+/tables/[^/]+" + * @param tableSchema schema used to convert Json to proto messages. + * @param client + */ + private Builder(String streamOrTableName, TableSchema tableSchema, BigQueryWriteClient client) { + Matcher streamMatcher = streamPattern.matcher(streamOrTableName); + if (!streamMatcher.matches()) { + Matcher tableMatcher = tablePattern.matcher(streamOrTableName); + if (!tableMatcher.matches()) { + throw new IllegalArgumentException("Invalid name: " + streamOrTableName); + } else { + this.streamName = streamOrTableName + "/_default"; + } + } else { + this.streamName = streamOrTableName; + } + this.tableSchema = tableSchema; + this.client = client; + } + + /** + * Setter for the underlying StreamWriter's TransportChannelProvider. + * + * @param channelProvider + * @return Builder + */ + public Builder setChannelProvider(TransportChannelProvider channelProvider) { + this.channelProvider = + Preconditions.checkNotNull(channelProvider, "ChannelProvider is null."); + return this; + } + + /** + * Setter for the underlying StreamWriter's CredentialsProvider. + * + * @param credentialsProvider + * @return Builder + */ + public Builder setCredentialsProvider(CredentialsProvider credentialsProvider) { + this.credentialsProvider = + Preconditions.checkNotNull(credentialsProvider, "CredentialsProvider is null."); + return this; + } + + /** + * Setter for the underlying StreamWriter's FlowControlSettings. + * + * @param flowControlSettings + * @return Builder + */ + public Builder setFlowControlSettings(FlowControlSettings flowControlSettings) { + Preconditions.checkNotNull(flowControlSettings, "FlowControlSettings is null."); + this.flowControlSettings = + Preconditions.checkNotNull(flowControlSettings, "FlowControlSettings is null."); + return this; + } + + /** + * Stream name on the builder. + * + * @return Builder + */ + public String getStreamName() { + return streamName; + } + + /** + * Setter for the underlying StreamWriter's Endpoint. + * + * @param endpoint + * @return Builder + */ + public Builder setEndpoint(String endpoint) { + this.endpoint = Preconditions.checkNotNull(endpoint, "Endpoint is null."); + return this; + } + + /** + * Setter for a traceId to help identify traffic origin. + * + * @param traceId + * @return Builder + */ + public Builder setTraceId(String traceId) { + this.traceId = Preconditions.checkNotNull(traceId, "TraceId is null."); + return this; + } + + /** + * Builds JsonStreamWriter + * + * @return JsonStreamWriter + */ + public JsonStreamWriter build() + throws Descriptors.DescriptorValidationException, + IllegalArgumentException, + IOException, + InterruptedException { + return new JsonStreamWriter(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonToProtoMessage.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonToProtoMessage.java new file mode 100644 index 000000000000..ac8fffdf7de7 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonToProtoMessage.java @@ -0,0 +1,536 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.pathtemplate.ValidationException; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.ByteString; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.FieldDescriptor; +import com.google.protobuf.DynamicMessage; +import com.google.protobuf.Message; +import com.google.protobuf.UninitializedMessageException; +import java.math.BigDecimal; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.util.List; +import java.util.logging.Logger; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +/** + * Converts Json data to protocol buffer messages given the protocol buffer descriptor. The protobuf + * descriptor must have all fields lowercased. + * + *

This client lib is deprecated, please use v1 instead. + */ +@Deprecated +public class JsonToProtoMessage { + private static final Logger LOG = Logger.getLogger(JsonToProtoMessage.class.getName()); + private static ImmutableMap FieldTypeToDebugMessage = + new ImmutableMap.Builder() + .put(FieldDescriptor.Type.BOOL, "boolean") + .put(FieldDescriptor.Type.BYTES, "bytes") + .put(FieldDescriptor.Type.INT32, "int32") + .put(FieldDescriptor.Type.DOUBLE, "double") + .put(FieldDescriptor.Type.INT64, "int64") + .put(FieldDescriptor.Type.STRING, "string") + .put(FieldDescriptor.Type.MESSAGE, "object") + .build(); + + /** + * Converts Json data to protocol buffer messages given the protocol buffer descriptor. + * + * @param protoSchema + * @param json + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + public static DynamicMessage convertJsonToProtoMessage(Descriptor protoSchema, JSONObject json) + throws IllegalArgumentException { + Preconditions.checkNotNull(json, "JSONObject is null."); + Preconditions.checkNotNull(protoSchema, "Protobuf descriptor is null."); + Preconditions.checkState(json.length() != 0, "JSONObject is empty."); + + return convertJsonToProtoMessageImpl(protoSchema, null, json, "root", /* topLevel= */ true); + } + + /** + * Converts Json data to protocol buffer messages given the protocol buffer descriptor. + * + * @param protoSchema + * @param tableSchema bigquery table schema is needed for type conversion of DATETIME, TIME, + * NUMERIC, BIGNUMERIC + * @param json + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + public static DynamicMessage convertJsonToProtoMessage( + Descriptor protoSchema, TableSchema tableSchema, JSONObject json) + throws IllegalArgumentException { + Preconditions.checkNotNull(json, "JSONObject is null."); + Preconditions.checkNotNull(protoSchema, "Protobuf descriptor is null."); + Preconditions.checkNotNull(tableSchema, "TableSchema is null."); + Preconditions.checkState(json.length() != 0, "JSONObject is empty."); + + return convertJsonToProtoMessageImpl( + protoSchema, tableSchema.getFieldsList(), json, "root", /* topLevel= */ true); + } + + /** + * Converts Json data to protocol buffer messages given the protocol buffer descriptor. + * + * @param protoSchema + * @param json + * @param jsonScope Debugging purposes + * @param topLevel checks if root level has any matching fields. + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + private static DynamicMessage convertJsonToProtoMessageImpl( + Descriptor protoSchema, + List tableSchema, + JSONObject json, + String jsonScope, + boolean topLevel) + throws IllegalArgumentException { + DynamicMessage.Builder protoMsg = DynamicMessage.newBuilder(protoSchema); + String[] jsonNames = JSONObject.getNames(json); + if (jsonNames == null) { + return protoMsg.build(); + } + for (int i = 0; i < jsonNames.length; i++) { + String jsonName = jsonNames[i]; + // We want lowercase here to support case-insensitive data writes. + // The protobuf descriptor that is used is assumed to have all lowercased fields + String jsonLowercaseName = jsonName.toLowerCase(); + String currentScope = jsonScope + "." + jsonName; + FieldDescriptor field = protoSchema.findFieldByName(jsonLowercaseName); + if (field == null) { + throw new IllegalArgumentException( + String.format("JSONObject has fields unknown to BigQuery: %s.", currentScope)); + } + TableFieldSchema fieldSchema = null; + if (tableSchema != null) { + // protoSchema is generated from tableSchema so their field ordering should match. + fieldSchema = tableSchema.get(field.getIndex()); + if (!fieldSchema.getName().toLowerCase().equals(field.getName())) { + throw new ValidationException( + "Field at index " + + field.getIndex() + + " has mismatch names (" + + fieldSchema.getName() + + ") (" + + field.getName() + + ")"); + } + } + if (!field.isRepeated()) { + fillField(protoMsg, field, fieldSchema, json, jsonName, currentScope); + } else { + fillRepeatedField(protoMsg, field, fieldSchema, json, jsonName, currentScope); + } + } + + DynamicMessage msg; + try { + msg = protoMsg.build(); + } catch (UninitializedMessageException e) { + String errorMsg = e.getMessage(); + int idxOfColon = errorMsg.indexOf(":"); + String missingFieldName = errorMsg.substring(idxOfColon + 2); + throw new IllegalArgumentException( + String.format( + "JSONObject does not have the required field %s.%s.", jsonScope, missingFieldName)); + } + if (topLevel && msg.getSerializedSize() == 0) { + throw new IllegalArgumentException("The created protobuf message is empty."); + } + return msg; + } + + /** + * Fills a non-repetaed protoField with the json data. + * + * @param protoMsg The protocol buffer message being constructed + * @param fieldDescriptor + * @param json + * @param exactJsonKeyName Exact key name in JSONObject instead of lowercased version + * @param currentScope Debugging purposes + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + private static void fillField( + DynamicMessage.Builder protoMsg, + FieldDescriptor fieldDescriptor, + TableFieldSchema fieldSchema, + JSONObject json, + String exactJsonKeyName, + String currentScope) + throws IllegalArgumentException { + + java.lang.Object val = json.get(exactJsonKeyName); + if (val == JSONObject.NULL) { + return; + } + switch (fieldDescriptor.getType()) { + case BOOL: + if (val instanceof Boolean) { + protoMsg.setField(fieldDescriptor, (Boolean) val); + return; + } + break; + case BYTES: + if (fieldSchema != null) { + if (fieldSchema.getType() == TableFieldSchema.Type.NUMERIC) { + if (val instanceof String) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal((String) val))); + return; + } else if (val instanceof Long || val instanceof Integer) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal(((Number) val).longValue()))); + return; + } else if (val instanceof Float || val instanceof Double) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal(((Number) val).doubleValue()))); + return; + } + } else if (fieldSchema.getType() == TableFieldSchema.Type.BIGNUMERIC) { + if (val instanceof String) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal((String) val))); + return; + } else if (val instanceof Long || val instanceof Integer) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal(((Number) val).longValue()))); + return; + } else if (val instanceof Float || val instanceof Double) { + protoMsg.setField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal(((Number) val).doubleValue()))); + return; + } + } + } + if (val instanceof ByteString) { + protoMsg.setField(fieldDescriptor, ((ByteString) val).toByteArray()); + return; + } else if (val instanceof JSONArray) { + try { + byte[] bytes = new byte[((JSONArray) val).length()]; + for (int j = 0; j < ((JSONArray) val).length(); j++) { + bytes[j] = (byte) ((JSONArray) val).getInt(j); + if (bytes[j] != ((JSONArray) val).getInt(j)) { + throw new IllegalArgumentException( + String.format( + "Error: " + + currentScope + + "[" + + j + + "] could not be converted to byte[].")); + } + } + protoMsg.setField(fieldDescriptor, bytes); + return; + } catch (JSONException e) { + throw new IllegalArgumentException( + String.format("Error: " + currentScope + "could not be converted to byte[].")); + } + } + break; + case INT64: + if (fieldSchema != null) { + if (fieldSchema.getType() == TableFieldSchema.Type.DATETIME) { + if (val instanceof String) { + protoMsg.setField( + fieldDescriptor, + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.parse((String) val))); + return; + } else if (val instanceof Long) { + protoMsg.setField(fieldDescriptor, (Long) val); + return; + } + } else if (fieldSchema.getType() == TableFieldSchema.Type.TIME) { + if (val instanceof String) { + protoMsg.setField( + fieldDescriptor, + CivilTimeEncoder.encodePacked64TimeMicrosLocalTime( + LocalTime.parse((String) val))); + return; + } else if (val instanceof Long) { + protoMsg.setField(fieldDescriptor, (Long) val); + return; + } + } + } + if (val instanceof Integer) { + protoMsg.setField(fieldDescriptor, new Long((Integer) val)); + return; + } else if (val instanceof Long) { + protoMsg.setField(fieldDescriptor, (Long) val); + return; + } + break; + case INT32: + if (val instanceof Integer) { + protoMsg.setField(fieldDescriptor, (Integer) val); + return; + } + break; + case STRING: + if (val instanceof String) { + protoMsg.setField(fieldDescriptor, (String) val); + return; + } + break; + case DOUBLE: + if (val instanceof Double) { + protoMsg.setField(fieldDescriptor, (Double) val); + return; + } else if (val instanceof Float) { + protoMsg.setField(fieldDescriptor, new Double((Float) val)); + return; + } + break; + case MESSAGE: + if (val instanceof JSONObject) { + Message.Builder message = protoMsg.newBuilderForField(fieldDescriptor); + protoMsg.setField( + fieldDescriptor, + convertJsonToProtoMessageImpl( + fieldDescriptor.getMessageType(), + fieldSchema == null ? null : fieldSchema.getFieldsList(), + json.getJSONObject(exactJsonKeyName), + currentScope, + /* topLevel= */ false)); + return; + } + break; + } + throw new IllegalArgumentException( + String.format( + "JSONObject does not have a %s field at %s.", + FieldTypeToDebugMessage.get(fieldDescriptor.getType()), currentScope)); + } + + /** + * Fills a repeated protoField with the json data. + * + * @param protoMsg The protocol buffer message being constructed + * @param fieldDescriptor + * @param json If root level has no matching fields, throws exception. + * @param exactJsonKeyName Exact key name in JSONObject instead of lowercased version + * @param currentScope Debugging purposes + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + private static void fillRepeatedField( + DynamicMessage.Builder protoMsg, + FieldDescriptor fieldDescriptor, + TableFieldSchema fieldSchema, + JSONObject json, + String exactJsonKeyName, + String currentScope) + throws IllegalArgumentException { + + JSONArray jsonArray; + try { + jsonArray = json.getJSONArray(exactJsonKeyName); + } catch (JSONException e) { + throw new IllegalArgumentException( + "JSONObject does not have a array field at " + currentScope + "."); + } + java.lang.Object val; + int index; + boolean fail = false; + for (int i = 0; i < jsonArray.length(); i++) { + val = jsonArray.get(i); + index = i; + switch (fieldDescriptor.getType()) { + case BOOL: + if (val instanceof Boolean) { + protoMsg.addRepeatedField(fieldDescriptor, (Boolean) val); + } else { + fail = true; + } + break; + case BYTES: + Boolean added = false; + if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.NUMERIC) { + if (val instanceof String) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal((String) val))); + added = true; + } else if (val instanceof Long || val instanceof Integer) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal(((Number) val).longValue()))); + added = true; + } else if (val instanceof Float || val instanceof Double) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal(((Number) val).doubleValue()))); + added = true; + } + } else if (fieldSchema != null + && fieldSchema.getType() == TableFieldSchema.Type.BIGNUMERIC) { + if (val instanceof String) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal((String) val))); + added = true; + } else if (val instanceof Long || val instanceof Integer) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal(((Number) val).longValue()))); + added = true; + } else if (val instanceof Float || val instanceof Double) { + protoMsg.addRepeatedField( + fieldDescriptor, + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal(((Number) val).doubleValue()))); + added = true; + } + } + if (!added) { + if (val instanceof JSONArray) { + try { + byte[] bytes = new byte[((JSONArray) val).length()]; + for (int j = 0; j < ((JSONArray) val).length(); j++) { + bytes[j] = (byte) ((JSONArray) val).getInt(j); + if (bytes[j] != ((JSONArray) val).getInt(j)) { + throw new IllegalArgumentException( + String.format( + "Error: " + + currentScope + + "[" + + index + + "] could not be converted to byte[].")); + } + } + protoMsg.addRepeatedField(fieldDescriptor, bytes); + } catch (JSONException e) { + throw new IllegalArgumentException( + String.format( + "Error: " + + currentScope + + "[" + + index + + "] could not be converted to byte[].")); + } + } else if (val instanceof ByteString) { + protoMsg.addRepeatedField(fieldDescriptor, ((ByteString) val).toByteArray()); + return; + } else { + fail = true; + } + } + break; + case INT64: + if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.DATETIME) { + if (val instanceof String) { + protoMsg.addRepeatedField( + fieldDescriptor, + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.parse((String) val))); + } else if (val instanceof Long) { + protoMsg.addRepeatedField(fieldDescriptor, (Long) val); + } else { + fail = true; + } + } else if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.TIME) { + if (val instanceof String) { + protoMsg.addRepeatedField( + fieldDescriptor, + CivilTimeEncoder.encodePacked64TimeMicrosLocalTime( + LocalTime.parse((String) val))); + } else if (val instanceof Long) { + protoMsg.addRepeatedField(fieldDescriptor, (Long) val); + } else { + fail = true; + } + } else if (val instanceof Integer) { + protoMsg.addRepeatedField(fieldDescriptor, new Long((Integer) val)); + } else if (val instanceof Long) { + protoMsg.addRepeatedField(fieldDescriptor, (Long) val); + } else { + fail = true; + } + break; + case INT32: + if (val instanceof Integer) { + protoMsg.addRepeatedField(fieldDescriptor, (Integer) val); + } else { + fail = true; + } + break; + case STRING: + if (val instanceof String) { + protoMsg.addRepeatedField(fieldDescriptor, (String) val); + } else { + fail = true; + } + break; + case DOUBLE: + if (val instanceof Double) { + protoMsg.addRepeatedField(fieldDescriptor, (Double) val); + } else if (val instanceof Float) { + protoMsg.addRepeatedField(fieldDescriptor, new Double((float) val)); + } else { + fail = true; + } + break; + case MESSAGE: + if (val instanceof JSONObject) { + Message.Builder message = protoMsg.newBuilderForField(fieldDescriptor); + protoMsg.addRepeatedField( + fieldDescriptor, + convertJsonToProtoMessageImpl( + fieldDescriptor.getMessageType(), + fieldSchema == null ? null : fieldSchema.getFieldsList(), + jsonArray.getJSONObject(i), + currentScope, + /* topLevel= */ false)); + } else { + fail = true; + } + break; + } + if (fail) { + throw new IllegalArgumentException( + String.format( + "JSONObject does not have a %s field at %s[%d].", + FieldTypeToDebugMessage.get(fieldDescriptor.getType()), currentScope, index)); + } + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaConverter.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaConverter.java new file mode 100644 index 000000000000..b3b084c39cfb --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaConverter.java @@ -0,0 +1,121 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.protobuf.DescriptorProtos.DescriptorProto; +import com.google.protobuf.DescriptorProtos.EnumDescriptorProto; +import com.google.protobuf.DescriptorProtos.FieldDescriptorProto; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.FieldDescriptor; +import io.grpc.Status; +import java.util.HashSet; +import java.util.Set; + +// A Converter class that turns a native protobuf::DescriptorProto to a self contained +// protobuf::DescriptorProto +// that can be reconstructed by the backend. +// +// This client lib is deprecated, please use v1 instead. +@Deprecated +public class ProtoSchemaConverter { + private static String getNameFromFullName(String fullName) { + return fullName.replace('.', '_'); + } + + private static ProtoSchema convertInternal( + Descriptor input, + Set visitedTypes, + Set enumTypes, + Set structTypes, + DescriptorProto.Builder rootProtoSchema) { + DescriptorProto.Builder resultProto = DescriptorProto.newBuilder(); + if (rootProtoSchema == null) { + rootProtoSchema = resultProto; + } + String protoFullName = input.getFullName(); + String protoName = getNameFromFullName(protoFullName); + resultProto.setName(protoName); + Set localEnumTypes = new HashSet(); + visitedTypes.add(input.getFullName()); + for (int i = 0; i < input.getFields().size(); i++) { + FieldDescriptor inputField = input.getFields().get(i); + FieldDescriptorProto.Builder resultField = inputField.toProto().toBuilder(); + if (inputField.getType() == FieldDescriptor.Type.GROUP + || inputField.getType() == FieldDescriptor.Type.MESSAGE) { + String msgFullName = inputField.getMessageType().getFullName(); + String msgName = getNameFromFullName(msgFullName); + if (structTypes.contains(msgFullName)) { + resultField.setTypeName(msgName); + } else { + if (visitedTypes.contains(msgFullName)) { + throw new InvalidArgumentException( + "Recursive type is not supported:" + inputField.getMessageType().getFullName(), + null, + GrpcStatusCode.of(Status.Code.INVALID_ARGUMENT), + false); + } + visitedTypes.add(msgFullName); + rootProtoSchema.addNestedType( + convertInternal( + inputField.getMessageType(), + visitedTypes, + enumTypes, + structTypes, + rootProtoSchema) + .getProtoDescriptor()); + visitedTypes.remove(msgFullName); + resultField.setTypeName( + rootProtoSchema.getNestedType(rootProtoSchema.getNestedTypeCount() - 1).getName()); + } + } + + if (inputField.getType() == FieldDescriptor.Type.ENUM) { + // For enums, in order to avoid value conflict, we will always define + // a enclosing struct called enum_full_name_E that includes the actual + // enum. + String enumFullName = inputField.getEnumType().getFullName(); + String enclosingTypeName = getNameFromFullName(enumFullName) + "_E"; + String enumName = inputField.getEnumType().getName(); + String actualEnumFullName = enclosingTypeName + "." + enumName; + if (enumTypes.contains(enumFullName)) { + resultField.setTypeName(actualEnumFullName); + } else { + EnumDescriptorProto enumType = inputField.getEnumType().toProto(); + resultProto.addNestedType( + DescriptorProto.newBuilder() + .setName(enclosingTypeName) + .addEnumType(enumType.toBuilder().setName(enumName)) + .build()); + resultField.setTypeName(actualEnumFullName); + enumTypes.add(enumFullName); + } + } + resultProto.addField(resultField); + } + structTypes.add(protoFullName); + + return ProtoSchema.newBuilder().setProtoDescriptor(resultProto.build()).build(); + } + + public static ProtoSchema convert(Descriptor descriptor) { + Set visitedTypes = new HashSet(); + Set enumTypes = new HashSet(); + Set structTypes = new HashSet(); + return convertInternal(descriptor, visitedTypes, enumTypes, structTypes, null); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamConnection.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamConnection.java new file mode 100644 index 000000000000..fa602ea6e029 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamConnection.java @@ -0,0 +1,107 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStream; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.StreamController; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; + +/** + * StreamConnection is responsible for writing requests to a GRPC bidirecional connection. + * + *

StreamWriter creates a connection. Two callback functions are necessary: request_callback and + * done_callback. Request callback is used for every request, and done callback is used to notify + * the user that the connection is closed and no more callbacks will be received from this + * connection. + * + *

The stream writer will accept all the requests without flow control, and makes the callbacks + * in receiving order. + * + *

It's user's responsibility to do the flow control and maintain the lifetime of the requests. + * + *

This client lib is deprecated, please use v1 instead. + */ +@Deprecated +public class StreamConnection { + private BidiStreamingCallable bidiStreamingCallable; + private ClientStream clientStream; + + private RequestCallback requestCallback; + private DoneCallback doneCallback; + + public StreamConnection( + BigQueryWriteClient client, RequestCallback requestCallback, DoneCallback doneCallback) { + this.requestCallback = requestCallback; + this.doneCallback = doneCallback; + + bidiStreamingCallable = client.appendRowsCallable(); + clientStream = + bidiStreamingCallable.splitCall( + new ResponseObserver() { + + @Override + public void onStart(StreamController controller) { + // no-op + } + + @Override + public void onResponse(AppendRowsResponse response) { + StreamConnection.this.requestCallback.run(response); + } + + @Override + public void onError(Throwable t) { + StreamConnection.this.doneCallback.run(t); + } + + @Override + public void onComplete() { + StreamConnection.this.doneCallback.run( + new StatusRuntimeException( + Status.fromCode(Code.CANCELLED) + .withDescription("Stream is closed by user."))); + } + }); + } + + /** + * Sends a request to the bi-directional stream connection. + * + * @param request request to send. + */ + public void send(AppendRowsRequest request) { + clientStream.send(request); + } + + /** Close the bi-directional stream connection. */ + public void close() { + clientStream.closeSend(); + } + + /** Invoked when a response is received from the server. */ + public static interface RequestCallback { + public void run(AppendRowsResponse response); + } + + /** Invoked when server closes the connection. */ + public static interface DoneCallback { + public void run(Throwable finalStatus); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamWriterV2.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamWriterV2.java new file mode 100644 index 000000000000..b9b7013ad0c6 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamWriterV2.java @@ -0,0 +1,624 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData; +import com.google.cloud.bigquery.storage.v1beta2.StreamConnection.DoneCallback; +import com.google.cloud.bigquery.storage.v1beta2.StreamConnection.RequestCallback; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.protobuf.Int64Value; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Deque; +import java.util.LinkedList; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.logging.Logger; +import javax.annotation.concurrent.GuardedBy; + +/** + * A BigQuery Stream Writer that can be used to write data into BigQuery Table. + * + *

TODO: Support batching. + * + *

TODO: Support schema change. + * + *

This client lib is deprecated, please use v1 instead. + */ +@Deprecated +public class StreamWriterV2 implements AutoCloseable { + private static final Logger log = Logger.getLogger(StreamWriterV2.class.getName()); + + private Lock lock; + private Condition hasMessageInWaitingQueue; + private Condition inflightReduced; + + /* + * The identifier of stream to write to. + */ + private final String streamName; + + /* + * The proto schema of rows to write. + */ + private final ProtoSchema writerSchema; + + /* + * Max allowed inflight requests in the stream. Method append is blocked at this. + */ + private final long maxInflightRequests; + + /* + * Max allowed inflight bytes in the stream. Method append is blocked at this. + */ + private final long maxInflightBytes; + + /* + * TraceId for debugging purpose. + */ + private final String traceId; + + /* + * Tracks current inflight requests in the stream. + */ + @GuardedBy("lock") + private long inflightRequests = 0; + + /* + * Tracks current inflight bytes in the stream. + */ + @GuardedBy("lock") + private long inflightBytes = 0; + + /* + * Indicates whether user has called Close() or not. + */ + @GuardedBy("lock") + private boolean userClosed = false; + + /* + * The final status of connection. Set to nonnull when connection is permanently closed. + */ + @GuardedBy("lock") + private Throwable connectionFinalStatus = null; + + /* + * Contains requests buffered in the client and not yet sent to server. + */ + @GuardedBy("lock") + private final Deque waitingRequestQueue; + + /* + * Contains sent append requests waiting for response from server. + */ + @GuardedBy("lock") + private final Deque inflightRequestQueue; + + /* + * A client used to interact with BigQuery. + */ + private BigQueryWriteClient client; + + /* + * If true, the client above is created by this writer and should be closed. + */ + private boolean ownsBigQueryWriteClient = false; + + /* + * Wraps the underlying bi-directional stream connection with server. + */ + private StreamConnection streamConnection; + + /* + * A separate thread to handle actual communication with server. + */ + private Thread appendThread; + + /** The maximum size of one request. Defined by the API. */ + public static long getApiMaxRequestBytes() { + return 10L * 1000L * 1000L; // 10 megabytes (https://en.wikipedia.org/wiki/Megabyte) + } + + private StreamWriterV2(Builder builder) throws IOException { + this.lock = new ReentrantLock(); + this.hasMessageInWaitingQueue = lock.newCondition(); + this.inflightReduced = lock.newCondition(); + this.streamName = builder.streamName; + if (builder.writerSchema == null) { + throw new StatusRuntimeException( + Status.fromCode(Code.INVALID_ARGUMENT) + .withDescription("Writer schema must be provided when building this writer.")); + } + this.writerSchema = builder.writerSchema; + this.maxInflightRequests = builder.maxInflightRequest; + this.maxInflightBytes = builder.maxInflightBytes; + this.traceId = builder.traceId; + this.waitingRequestQueue = new LinkedList(); + this.inflightRequestQueue = new LinkedList(); + if (builder.client == null) { + BigQueryWriteSettings stubSettings = + BigQueryWriteSettings.newBuilder() + .setCredentialsProvider(builder.credentialsProvider) + .setTransportChannelProvider(builder.channelProvider) + .setEndpoint(builder.endpoint) + // (b/185842996): Temporily fix this by explicitly providing the header. + .setHeaderProvider( + FixedHeaderProvider.create( + "x-goog-request-params", "write_stream=" + this.streamName)) + .build(); + this.client = BigQueryWriteClient.create(stubSettings); + this.ownsBigQueryWriteClient = true; + } else { + this.client = builder.client; + this.ownsBigQueryWriteClient = false; + } + + this.streamConnection = + new StreamConnection( + this.client, + new RequestCallback() { + @Override + public void run(AppendRowsResponse response) { + requestCallback(response); + } + }, + new DoneCallback() { + @Override + public void run(Throwable finalStatus) { + doneCallback(finalStatus); + } + }); + this.appendThread = + new Thread( + new Runnable() { + @Override + public void run() { + appendLoop(); + } + }); + this.appendThread.start(); + } + + /** + * Schedules the writing of a message. + * + *

Example of writing a message. + * + *

{@code
+   * AppendRowsRequest message;
+   * ApiFuture messageIdFuture = writer.append(message);
+   * ApiFutures.addCallback(messageIdFuture, new ApiFutureCallback() {
+   *   public void onSuccess(AppendRowsResponse response) {
+   *     if (!response.hasError()) {
+   *       System.out.println("written with offset: " + response.getAppendResult().getOffset());
+   *     } else {
+   *       System.out.println("received an in stream error: " + response.getError().toString());
+   *     }
+   *   }
+   *
+   *   public void onFailure(Throwable t) {
+   *     System.out.println("failed to write: " + t);
+   *   }
+   * }, MoreExecutors.directExecutor());
+   * }
+ * + * @param rows the rows in serialized format to write to BigQuery. + * @param offset the offset of the first row. + * @return the append response wrapped in a future. + */ + public ApiFuture append(ProtoRows rows, long offset) { + AppendRowsRequest.Builder requestBuilder = AppendRowsRequest.newBuilder(); + requestBuilder.setProtoRows(ProtoData.newBuilder().setRows(rows).build()); + if (offset >= 0) { + requestBuilder.setOffset(Int64Value.of(offset)); + } + return appendInternal(requestBuilder.build()); + } + + private ApiFuture appendInternal(AppendRowsRequest message) { + AppendRequestAndResponse requestWrapper = new AppendRequestAndResponse(message); + if (requestWrapper.messageSize > getApiMaxRequestBytes()) { + requestWrapper.appendResult.setException( + new StatusRuntimeException( + Status.fromCode(Code.INVALID_ARGUMENT) + .withDescription( + "MessageSize is too large. Max allow: " + + getApiMaxRequestBytes() + + " Actual: " + + requestWrapper.messageSize))); + return requestWrapper.appendResult; + } + this.lock.lock(); + try { + if (userClosed) { + requestWrapper.appendResult.setException( + new StatusRuntimeException( + Status.fromCode(Status.Code.FAILED_PRECONDITION) + .withDescription("Stream is already closed"))); + return requestWrapper.appendResult; + } + if (connectionFinalStatus != null) { + requestWrapper.appendResult.setException( + new StatusRuntimeException( + Status.fromCode(Status.Code.FAILED_PRECONDITION) + .withDescription( + "Stream is closed due to " + connectionFinalStatus.toString()))); + return requestWrapper.appendResult; + } + + ++this.inflightRequests; + this.inflightBytes += requestWrapper.messageSize; + waitingRequestQueue.addLast(requestWrapper); + hasMessageInWaitingQueue.signal(); + maybeWaitForInflightQuota(); + return requestWrapper.appendResult; + } finally { + this.lock.unlock(); + } + } + + @GuardedBy("lock") + private void maybeWaitForInflightQuota() { + while (this.inflightRequests >= this.maxInflightRequests + || this.inflightBytes >= this.maxInflightBytes) { + try { + inflightReduced.await(100, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + log.warning( + "Interrupted while waiting for inflight quota. Stream: " + + streamName + + " Error: " + + e.toString()); + throw new StatusRuntimeException( + Status.fromCode(Code.CANCELLED) + .withCause(e) + .withDescription("Interrupted while waiting for quota.")); + } + } + } + + /** Close the stream writer. Shut down all resources. */ + @Override + public void close() { + log.info("User closing stream: " + streamName); + this.lock.lock(); + try { + this.userClosed = true; + } finally { + this.lock.unlock(); + } + log.fine("Waiting for append thread to finish. Stream: " + streamName); + try { + appendThread.join(); + log.info("User close complete. Stream: " + streamName); + } catch (InterruptedException e) { + // Unexpected. Just swallow the exception with logging. + log.warning( + "Append handler join is interrupted. Stream: " + streamName + " Error: " + e.toString()); + } + if (this.ownsBigQueryWriteClient) { + this.client.close(); + try { + this.client.awaitTermination(1, TimeUnit.MINUTES); + } catch (InterruptedException ignored) { + } + } + } + + /* + * This loop is executed in a separate thread. + * + * It takes requests from waiting queue and sends them to server. + */ + private void appendLoop() { + boolean isFirstRequestInConnection = true; + Deque localQueue = new LinkedList(); + while (!waitingQueueDrained()) { + this.lock.lock(); + try { + hasMessageInWaitingQueue.await(100, TimeUnit.MILLISECONDS); + while (!this.waitingRequestQueue.isEmpty()) { + AppendRequestAndResponse requestWrapper = this.waitingRequestQueue.pollFirst(); + this.inflightRequestQueue.addLast(requestWrapper); + localQueue.addLast(requestWrapper); + } + } catch (InterruptedException e) { + log.warning( + "Interrupted while waiting for message. Stream: " + + streamName + + " Error: " + + e.toString()); + } finally { + this.lock.unlock(); + } + + if (localQueue.isEmpty()) { + continue; + } + + // TODO: Add reconnection here. + while (!localQueue.isEmpty()) { + AppendRowsRequest preparedRequest = + prepareRequestBasedOnPosition( + localQueue.pollFirst().message, isFirstRequestInConnection); + this.streamConnection.send(preparedRequest); + isFirstRequestInConnection = false; + } + } + + log.fine("Cleanup starts. Stream: " + streamName); + // At this point, the waiting queue is drained, so no more requests. + // We can close the stream connection and handle the remaining inflight requests. + this.streamConnection.close(); + waitForDoneCallback(); + + // At this point, there cannot be more callback. It is safe to clean up all inflight requests. + log.fine( + "Stream connection is fully closed. Cleaning up inflight requests. Stream: " + streamName); + cleanupInflightRequests(); + log.fine("Append thread is done. Stream: " + streamName); + } + + /* + * Returns true if waiting queue is drain, a.k.a. no more requests in the waiting queue. + * + * It serves as a signal to append thread that there cannot be any more requests in the waiting + * queue and it can prepare to stop. + */ + private boolean waitingQueueDrained() { + this.lock.lock(); + try { + return (this.userClosed || this.connectionFinalStatus != null) + && this.waitingRequestQueue.isEmpty(); + } finally { + this.lock.unlock(); + } + } + + private void waitForDoneCallback() { + log.fine("Waiting for done callback from stream connection. Stream: " + streamName); + while (true) { + this.lock.lock(); + try { + if (connectionFinalStatus != null) { + // Done callback is received, return. + return; + } + } finally { + this.lock.unlock(); + } + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); + } + } + + private AppendRowsRequest prepareRequestBasedOnPosition( + AppendRowsRequest original, boolean isFirstRequest) { + AppendRowsRequest.Builder requestBuilder = original.toBuilder(); + if (isFirstRequest) { + if (this.writerSchema != null) { + requestBuilder.getProtoRowsBuilder().setWriterSchema(this.writerSchema); + } + requestBuilder.setWriteStream(this.streamName); + if (this.traceId != null) { + requestBuilder.setTraceId(this.traceId); + } + } else { + requestBuilder.clearWriteStream(); + requestBuilder.getProtoRowsBuilder().clearWriterSchema(); + } + return requestBuilder.build(); + } + + private void cleanupInflightRequests() { + Throwable finalStatus; + Deque localQueue = new LinkedList(); + this.lock.lock(); + try { + finalStatus = this.connectionFinalStatus; + while (!this.inflightRequestQueue.isEmpty()) { + localQueue.addLast(pollInflightRequestQueue()); + } + } finally { + this.lock.unlock(); + } + log.fine( + "Cleaning " + + localQueue.size() + + " inflight requests with error: " + + finalStatus.toString()); + while (!localQueue.isEmpty()) { + localQueue.pollFirst().appendResult.setException(finalStatus); + } + } + + private void requestCallback(AppendRowsResponse response) { + AppendRequestAndResponse requestWrapper; + this.lock.lock(); + try { + requestWrapper = pollInflightRequestQueue(); + } finally { + this.lock.unlock(); + } + if (response.hasError()) { + StatusRuntimeException exception = + new StatusRuntimeException( + Status.fromCodeValue(response.getError().getCode()) + .withDescription(response.getError().getMessage())); + requestWrapper.appendResult.setException(exception); + } else { + requestWrapper.appendResult.set(response); + } + } + + private void doneCallback(Throwable finalStatus) { + log.fine( + "Received done callback. Stream: " + + streamName + + " Final status: " + + finalStatus.toString()); + this.lock.lock(); + try { + this.connectionFinalStatus = finalStatus; + } finally { + this.lock.unlock(); + } + } + + @GuardedBy("lock") + private AppendRequestAndResponse pollInflightRequestQueue() { + AppendRequestAndResponse requestWrapper = this.inflightRequestQueue.pollFirst(); + --this.inflightRequests; + this.inflightBytes -= requestWrapper.messageSize; + this.inflightReduced.signal(); + return requestWrapper; + } + + /** + * Constructs a new {@link StreamWriterV2.Builder} using the given stream and client. AppendRows + * needs special headers to be added to client, so a passed in client will not work. This should + * be used by test only. + */ + public static StreamWriterV2.Builder newBuilder(String streamName, BigQueryWriteClient client) { + return new StreamWriterV2.Builder(streamName, client); + } + + /** Constructs a new {@link StreamWriterV2.Builder} using the given stream. */ + public static StreamWriterV2.Builder newBuilder(String streamName) { + return new StreamWriterV2.Builder(streamName); + } + + /** A builder of {@link StreamWriterV2}s. */ + public static final class Builder { + + private static final long DEFAULT_MAX_INFLIGHT_REQUESTS = 1000L; + + private static final long DEFAULT_MAX_INFLIGHT_BYTES = 100 * 1024 * 1024; // 100Mb. + + private String streamName; + + private BigQueryWriteClient client; + + private ProtoSchema writerSchema = null; + + private long maxInflightRequest = DEFAULT_MAX_INFLIGHT_REQUESTS; + + private long maxInflightBytes = DEFAULT_MAX_INFLIGHT_BYTES; + + private String endpoint = BigQueryWriteSettings.getDefaultEndpoint(); + + private TransportChannelProvider channelProvider = + BigQueryWriteSettings.defaultGrpcTransportProviderBuilder().setChannelsPerCpu(1).build(); + + private CredentialsProvider credentialsProvider = + BigQueryWriteSettings.defaultCredentialsProviderBuilder().build(); + + private String traceId = null; + + private Builder(String streamName) { + this.streamName = Preconditions.checkNotNull(streamName); + this.client = null; + } + + private Builder(String streamName, BigQueryWriteClient client) { + this.streamName = Preconditions.checkNotNull(streamName); + this.client = Preconditions.checkNotNull(client); + } + + /** Sets the proto schema of the rows. */ + public Builder setWriterSchema(ProtoSchema writerSchema) { + this.writerSchema = writerSchema; + return this; + } + + public Builder setMaxInflightRequests(long value) { + this.maxInflightRequest = value; + return this; + } + + public Builder setMaxInflightBytes(long value) { + this.maxInflightBytes = value; + return this; + } + + /** Gives the ability to override the gRPC endpoint. */ + public Builder setEndpoint(String endpoint) { + this.endpoint = Preconditions.checkNotNull(endpoint, "Endpoint is null."); + return this; + } + + /** + * {@code ChannelProvider} to use to create Channels, which must point at Cloud BigQuery Storage + * API endpoint. + * + *

For performance, this client benefits from having multiple underlying connections. See + * {@link com.google.api.gax.grpc.InstantiatingGrpcChannelProvider.Builder#setPoolSize(int)}. + */ + public Builder setChannelProvider(TransportChannelProvider channelProvider) { + this.channelProvider = + Preconditions.checkNotNull(channelProvider, "ChannelProvider is null."); + return this; + } + + /** {@code CredentialsProvider} to use to create Credentials to authenticate calls. */ + public Builder setCredentialsProvider(CredentialsProvider credentialsProvider) { + this.credentialsProvider = + Preconditions.checkNotNull(credentialsProvider, "CredentialsProvider is null."); + return this; + } + + /** + * Sets traceId for debuging purpose. TraceId must follow the format of + * CustomerDomain:DebugString, e.g. DATAFLOW:job_id_x. + */ + public Builder setTraceId(String traceId) { + int colonIndex = traceId.indexOf(':'); + if (colonIndex == -1 || colonIndex == 0 || colonIndex == traceId.length() - 1) { + throw new IllegalArgumentException( + "TraceId must follow the format of A:B. Actual:" + traceId); + } + this.traceId = traceId; + return this; + } + + /** Builds the {@code StreamWriterV2}. */ + public StreamWriterV2 build() throws IOException { + return new StreamWriterV2(this); + } + } + + // Class that wraps AppendRowsRequest and its corresponding Response future. + private static final class AppendRequestAndResponse { + final SettableApiFuture appendResult; + final AppendRowsRequest message; + final long messageSize; + + AppendRequestAndResponse(AppendRowsRequest message) { + this.appendResult = SettableApiFuture.create(); + this.message = message; + this.messageSize = message.getProtoRows().getSerializedSize(); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/Waiter.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/Waiter.java new file mode 100644 index 000000000000..997cfc4613cd --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/Waiter.java @@ -0,0 +1,189 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.InternalApi; +import com.google.api.gax.batching.FlowControlSettings; +import com.google.api.gax.batching.FlowController; +import java.util.LinkedList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.logging.Logger; + +/** + * A barrier kind of object that helps keep track of pending actions and synchronously wait until + * all have completed. + */ +@Deprecated +class Waiter { + private static final Logger LOG = + Logger.getLogger(com.google.cloud.bigquery.storage.v1beta2.Waiter.class.getName()); + + private long pendingCount; + private long pendingSize; + private long countLimit; + private long sizeLimit; + private FlowController.LimitExceededBehavior behavior; + private LinkedList awaitingMessageAcquires; + private LinkedList awaitingBytesAcquires; + private final Lock lock; + + Waiter(FlowControlSettings flowControlSettings) { + pendingCount = 0; + pendingSize = 0; + this.awaitingMessageAcquires = new LinkedList(); + this.awaitingBytesAcquires = new LinkedList(); + this.countLimit = flowControlSettings.getMaxOutstandingElementCount(); + this.sizeLimit = flowControlSettings.getMaxOutstandingRequestBytes(); + this.behavior = flowControlSettings.getLimitExceededBehavior(); + this.lock = new ReentrantLock(); + } + + private void notifyNextAcquires() { + if (!awaitingMessageAcquires.isEmpty()) { + CountDownLatch awaitingAcquire = awaitingMessageAcquires.getFirst(); + awaitingAcquire.countDown(); + } + if (!awaitingBytesAcquires.isEmpty()) { + CountDownLatch awaitingAcquire = awaitingBytesAcquires.getFirst(); + awaitingAcquire.countDown(); + } + } + + public synchronized void release(long messageSize) throws IllegalStateException { + lock.lock(); + LOG.fine("release: " + pendingCount + " to " + (pendingCount - 1)); + --pendingCount; + if (pendingCount < 0) { + throw new IllegalStateException("pendingCount cannot be less than 0"); + } + pendingSize -= messageSize; + if (pendingSize < 0) { + throw new IllegalStateException("pendingSize cannot be less than 0"); + } + notifyNextAcquires(); + lock.unlock(); + notifyAll(); + } + + public void acquire(long messageSize) throws FlowController.FlowControlException { + lock.lock(); + try { + LOG.fine("acquire " + pendingCount + " to " + (pendingCount + 1)); + if (pendingCount >= countLimit + && behavior == FlowController.LimitExceededBehavior.ThrowException) { + throw new FlowController.MaxOutstandingElementCountReachedException(countLimit); + } + if (pendingSize + messageSize >= sizeLimit + && behavior == FlowController.LimitExceededBehavior.ThrowException) { + throw new FlowController.MaxOutstandingRequestBytesReachedException(sizeLimit); + } + + CountDownLatch messageWaiter = null; + while (pendingCount >= countLimit) { + if (messageWaiter == null) { + messageWaiter = new CountDownLatch(1); + awaitingMessageAcquires.addLast(messageWaiter); + } else { + // This message already in line stays at the head of the line. + messageWaiter = new CountDownLatch(1); + awaitingMessageAcquires.set(0, messageWaiter); + } + lock.unlock(); + try { + messageWaiter.await(); + } catch (InterruptedException e) { + LOG.warning("Interrupted while waiting to acquire flow control tokens"); + } + lock.lock(); + } + ++pendingCount; + if (messageWaiter != null) { + awaitingMessageAcquires.removeFirst(); + } + + if (!awaitingMessageAcquires.isEmpty() && pendingCount < countLimit) { + awaitingMessageAcquires.getFirst().countDown(); + } + + // Now acquire space for bytes. + CountDownLatch bytesWaiter = null; + Long bytesRemaining = messageSize; + while (pendingSize + messageSize >= sizeLimit) { + if (bytesWaiter == null) { + // This message gets added to the back of the line. + bytesWaiter = new CountDownLatch(1); + awaitingBytesAcquires.addLast(bytesWaiter); + } else { + // This message already in line stays at the head of the line. + bytesWaiter = new CountDownLatch(1); + awaitingBytesAcquires.set(0, bytesWaiter); + } + lock.unlock(); + try { + bytesWaiter.await(); + } catch (InterruptedException e) { + LOG.warning("Interrupted while waiting to acquire flow control tokens"); + } + lock.lock(); + } + + pendingSize += messageSize; + if (bytesWaiter != null) { + awaitingBytesAcquires.removeFirst(); + } + // There may be some surplus bytes left; let the next message waiting for bytes have some. + if (!awaitingBytesAcquires.isEmpty() && pendingSize < sizeLimit) { + awaitingBytesAcquires.getFirst().countDown(); + } + } finally { + lock.unlock(); + } + } + + public synchronized void waitComplete(long timeoutMillis) throws InterruptedException { + long end = System.currentTimeMillis() + timeoutMillis; + lock.lock(); + try { + while (pendingCount > 0 && (timeoutMillis == 0 || end > System.currentTimeMillis())) { + lock.unlock(); + try { + wait(timeoutMillis == 0 ? 0 : end - System.currentTimeMillis()); + } catch (InterruptedException e) { + throw e; + } + lock.lock(); + } + if (pendingCount > 0) { + throw new InterruptedException("Wait timeout"); + } + } finally { + lock.unlock(); + } + } + + @InternalApi + public long pendingCount() { + return pendingCount; + } + + @InternalApi + public long pendingSize() { + return pendingSize; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/gapic_metadata.json b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/gapic_metadata.json new file mode 100644 index 000000000000..65272ad1889c --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/gapic_metadata.json @@ -0,0 +1,54 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.cloud.bigquery.storage.v1beta2", + "libraryPackage": "com.google.cloud.bigquery.storage.v1beta2", + "services": { + "BigQueryRead": { + "clients": { + "grpc": { + "libraryClient": "BaseBigQueryReadClient", + "rpcs": { + "CreateReadSession": { + "methods": ["createReadSession", "createReadSession", "createReadSession", "createReadSessionCallable"] + }, + "ReadRows": { + "methods": ["readRowsCallable"] + }, + "SplitReadStream": { + "methods": ["splitReadStream", "splitReadStreamCallable"] + } + } + } + } + }, + "BigQueryWrite": { + "clients": { + "grpc": { + "libraryClient": "BigQueryWriteClient", + "rpcs": { + "AppendRows": { + "methods": ["appendRowsCallable"] + }, + "BatchCommitWriteStreams": { + "methods": ["batchCommitWriteStreams", "batchCommitWriteStreams", "batchCommitWriteStreamsCallable"] + }, + "CreateWriteStream": { + "methods": ["createWriteStream", "createWriteStream", "createWriteStream", "createWriteStreamCallable"] + }, + "FinalizeWriteStream": { + "methods": ["finalizeWriteStream", "finalizeWriteStream", "finalizeWriteStream", "finalizeWriteStreamCallable"] + }, + "FlushRows": { + "methods": ["flushRows", "flushRows", "flushRows", "flushRowsCallable"] + }, + "GetWriteStream": { + "methods": ["getWriteStream", "getWriteStream", "getWriteStream", "getWriteStreamCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/package-info.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/package-info.java new file mode 100644 index 000000000000..afdbb4db6b1f --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/package-info.java @@ -0,0 +1,76 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to BigQuery Storage API + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= BaseBigQueryReadClient ======================= + * + *

Service Description: BigQuery Read API. + * + *

The Read API can be used to read data from BigQuery. + * + *

New code should use the v1 Read API going forward, if they don't use Write API at the same + * time. + * + *

Sample for BaseBigQueryReadClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+ *   ProjectName parent = ProjectName.of("[PROJECT]");
+ *   ReadSession readSession = ReadSession.newBuilder().build();
+ *   int maxStreamCount = 940837515;
+ *   ReadSession response =
+ *       baseBigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+ * }
+ * }
+ * + *

======================= BigQueryWriteClient ======================= + * + *

Service Description: BigQuery Write API. + * + *

The Write API can be used to write data to BigQuery. + * + *

The [google.cloud.bigquery.storage.v1 + * API](/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1) should be used + * instead of the v1beta2 API for BigQueryWrite operations. + * + *

Sample for BigQueryWriteClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+ *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
+ *   WriteStream writeStream = WriteStream.newBuilder().build();
+ *   WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
+ * }
+ * }
+ */ +@Generated("by gapic-generator-java") +package com.google.cloud.bigquery.storage.v1beta2; + +import javax.annotation.Generated; diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStub.java new file mode 100644 index 000000000000..bed68ab12ee5 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStub.java @@ -0,0 +1,55 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the BigQueryRead service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@BetaApi +@Generated("by gapic-generator-java") +public abstract class BigQueryReadStub implements BackgroundResource { + + public UnaryCallable createReadSessionCallable() { + throw new UnsupportedOperationException("Not implemented: createReadSessionCallable()"); + } + + public ServerStreamingCallable readRowsCallable() { + throw new UnsupportedOperationException("Not implemented: readRowsCallable()"); + } + + public UnaryCallable splitReadStreamCallable() { + throw new UnsupportedOperationException("Not implemented: splitReadStreamCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStubSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStubSettings.java new file mode 100644 index 000000000000..f784b748b2d1 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStubSettings.java @@ -0,0 +1,389 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BigQueryReadStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createReadSession: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BigQueryReadStubSettings.Builder baseBigQueryReadSettingsBuilder =
+ *     BigQueryReadStubSettings.newBuilder();
+ * baseBigQueryReadSettingsBuilder
+ *     .createReadSessionSettings()
+ *     .setRetrySettings(
+ *         baseBigQueryReadSettingsBuilder
+ *             .createReadSessionSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * BigQueryReadStubSettings baseBigQueryReadSettings = baseBigQueryReadSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class BigQueryReadStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/bigquery") + .add("https://www.googleapis.com/auth/cloud-platform") + .build(); + + private final UnaryCallSettings createReadSessionSettings; + private final ServerStreamingCallSettings readRowsSettings; + private final UnaryCallSettings + splitReadStreamSettings; + + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings readRowsSettings() { + return readRowsSettings; + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + public BigQueryReadStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcBigQueryReadStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "bigquerystorage"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "bigquerystorage.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "bigquerystorage.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(BigQueryReadStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryReadStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createReadSessionSettings = settingsBuilder.createReadSessionSettings().build(); + readRowsSettings = settingsBuilder.readRowsSettings().build(); + splitReadStreamSettings = settingsBuilder.splitReadStreamSettings().build(); + } + + /** Builder for BigQueryReadStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder + createReadSessionSettings; + private final ServerStreamingCallSettings.Builder + readRowsSettings; + private final UnaryCallSettings.Builder + splitReadStreamSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_0_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + definitions.put( + "retry_policy_1_codes", + ImmutableSet.copyOf(Lists.newArrayList(StatusCode.Code.UNAVAILABLE))); + definitions.put( + "retry_policy_2_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(600000L)) + .build(); + definitions.put("retry_policy_0_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(86400000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(86400000L)) + .setTotalTimeoutDuration(Duration.ofMillis(86400000L)) + .build(); + definitions.put("retry_policy_1_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(600000L)) + .build(); + definitions.put("retry_policy_2_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createReadSessionSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + readRowsSettings = ServerStreamingCallSettings.newBuilder(); + splitReadStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, splitReadStreamSettings); + initDefaults(this); + } + + protected Builder(BigQueryReadStubSettings settings) { + super(settings); + + createReadSessionSettings = settings.createReadSessionSettings.toBuilder(); + readRowsSettings = settings.readRowsSettings.toBuilder(); + splitReadStreamSettings = settings.splitReadStreamSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, splitReadStreamSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .createReadSessionSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .readRowsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_1_params")); + + builder + .splitReadStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return readRowsSettings; + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + @Override + public BigQueryReadStubSettings build() throws IOException { + return new BigQueryReadStubSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStub.java new file mode 100644 index 000000000000..39618310785f --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStub.java @@ -0,0 +1,83 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.WriteStream; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the BigQueryWrite service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + * + * @deprecated This class is deprecated and will be removed in the next major version update. + */ +@BetaApi +@Deprecated +@Generated("by gapic-generator-java") +public abstract class BigQueryWriteStub implements BackgroundResource { + + @Deprecated + public UnaryCallable createWriteStreamCallable() { + throw new UnsupportedOperationException("Not implemented: createWriteStreamCallable()"); + } + + @Deprecated + public BidiStreamingCallable appendRowsCallable() { + throw new UnsupportedOperationException("Not implemented: appendRowsCallable()"); + } + + @Deprecated + public UnaryCallable getWriteStreamCallable() { + throw new UnsupportedOperationException("Not implemented: getWriteStreamCallable()"); + } + + @Deprecated + public UnaryCallable + finalizeWriteStreamCallable() { + throw new UnsupportedOperationException("Not implemented: finalizeWriteStreamCallable()"); + } + + @Deprecated + public UnaryCallable + batchCommitWriteStreamsCallable() { + throw new UnsupportedOperationException("Not implemented: batchCommitWriteStreamsCallable()"); + } + + @Deprecated + public UnaryCallable flushRowsCallable() { + throw new UnsupportedOperationException("Not implemented: flushRowsCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStubSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStubSettings.java new file mode 100644 index 000000000000..8bd7b46647f1 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStubSettings.java @@ -0,0 +1,532 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.WriteStream; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link BigQueryWriteStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createWriteStream: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * BigQueryWriteStubSettings.Builder bigQueryWriteSettingsBuilder =
+ *     BigQueryWriteStubSettings.newBuilder();
+ * bigQueryWriteSettingsBuilder
+ *     .createWriteStreamSettings()
+ *     .setRetrySettings(
+ *         bigQueryWriteSettingsBuilder
+ *             .createWriteStreamSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * BigQueryWriteStubSettings bigQueryWriteSettings = bigQueryWriteSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + * + * @deprecated This class is deprecated and will be removed in the next major version update. + */ +@BetaApi +@Deprecated +@Generated("by gapic-generator-java") +public class BigQueryWriteStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/bigquery") + .add("https://www.googleapis.com/auth/bigquery.insertdata") + .add("https://www.googleapis.com/auth/cloud-platform") + .build(); + + private final UnaryCallSettings createWriteStreamSettings; + private final StreamingCallSettings appendRowsSettings; + private final UnaryCallSettings getWriteStreamSettings; + private final UnaryCallSettings + finalizeWriteStreamSettings; + private final UnaryCallSettings + batchCommitWriteStreamsSettings; + private final UnaryCallSettings flushRowsSettings; + + /** + * Returns the object with the settings used for calls to createWriteStream. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings createWriteStreamSettings() { + return createWriteStreamSettings; + } + + /** + * Returns the object with the settings used for calls to appendRows. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public StreamingCallSettings appendRowsSettings() { + return appendRowsSettings; + } + + /** + * Returns the object with the settings used for calls to getWriteStream. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings getWriteStreamSettings() { + return getWriteStreamSettings; + } + + /** + * Returns the object with the settings used for calls to finalizeWriteStream. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings + finalizeWriteStreamSettings() { + return finalizeWriteStreamSettings; + } + + /** + * Returns the object with the settings used for calls to batchCommitWriteStreams. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings + batchCommitWriteStreamsSettings() { + return batchCommitWriteStreamsSettings; + } + + /** + * Returns the object with the settings used for calls to flushRows. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings flushRowsSettings() { + return flushRowsSettings; + } + + public BigQueryWriteStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcBigQueryWriteStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "bigquerystorage"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "bigquerystorage.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "bigquerystorage.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(BigQueryWriteStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryWriteStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createWriteStreamSettings = settingsBuilder.createWriteStreamSettings().build(); + appendRowsSettings = settingsBuilder.appendRowsSettings().build(); + getWriteStreamSettings = settingsBuilder.getWriteStreamSettings().build(); + finalizeWriteStreamSettings = settingsBuilder.finalizeWriteStreamSettings().build(); + batchCommitWriteStreamsSettings = settingsBuilder.batchCommitWriteStreamsSettings().build(); + flushRowsSettings = settingsBuilder.flushRowsSettings().build(); + } + + /** Builder for BigQueryWriteStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder + createWriteStreamSettings; + private final StreamingCallSettings.Builder + appendRowsSettings; + private final UnaryCallSettings.Builder + getWriteStreamSettings; + private final UnaryCallSettings.Builder + finalizeWriteStreamSettings; + private final UnaryCallSettings.Builder< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsSettings; + private final UnaryCallSettings.Builder flushRowsSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_3_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, + StatusCode.Code.UNAVAILABLE, + StatusCode.Code.RESOURCE_EXHAUSTED))); + definitions.put( + "retry_policy_4_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.UNAVAILABLE, StatusCode.Code.RESOURCE_EXHAUSTED))); + definitions.put( + "retry_policy_5_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(600000L)) + .build(); + definitions.put("retry_policy_3_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(86400000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(86400000L)) + .setTotalTimeoutDuration(Duration.ofMillis(86400000L)) + .build(); + definitions.put("retry_policy_4_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(60000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(600000L)) + .build(); + definitions.put("retry_policy_5_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + appendRowsSettings = StreamingCallSettings.newBuilder(); + getWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + finalizeWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + batchCommitWriteStreamsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + flushRowsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createWriteStreamSettings, + getWriteStreamSettings, + finalizeWriteStreamSettings, + batchCommitWriteStreamsSettings, + flushRowsSettings); + initDefaults(this); + } + + protected Builder(BigQueryWriteStubSettings settings) { + super(settings); + + createWriteStreamSettings = settings.createWriteStreamSettings.toBuilder(); + appendRowsSettings = settings.appendRowsSettings.toBuilder(); + getWriteStreamSettings = settings.getWriteStreamSettings.toBuilder(); + finalizeWriteStreamSettings = settings.finalizeWriteStreamSettings.toBuilder(); + batchCommitWriteStreamsSettings = settings.batchCommitWriteStreamsSettings.toBuilder(); + flushRowsSettings = settings.flushRowsSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createWriteStreamSettings, + getWriteStreamSettings, + finalizeWriteStreamSettings, + batchCommitWriteStreamsSettings, + flushRowsSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .createWriteStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_3_params")); + + builder + .getWriteStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_5_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_5_params")); + + builder + .finalizeWriteStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_5_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_5_params")); + + builder + .batchCommitWriteStreamsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_5_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_5_params")); + + builder + .flushRowsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_5_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_5_params")); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** + * Returns the builder for the settings used for calls to createWriteStream. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings.Builder + createWriteStreamSettings() { + return createWriteStreamSettings; + } + + /** + * Returns the builder for the settings used for calls to appendRows. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public StreamingCallSettings.Builder + appendRowsSettings() { + return appendRowsSettings; + } + + /** + * Returns the builder for the settings used for calls to getWriteStream. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings.Builder getWriteStreamSettings() { + return getWriteStreamSettings; + } + + /** + * Returns the builder for the settings used for calls to finalizeWriteStream. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings.Builder + finalizeWriteStreamSettings() { + return finalizeWriteStreamSettings; + } + + /** + * Returns the builder for the settings used for calls to batchCommitWriteStreams. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings.Builder< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsSettings() { + return batchCommitWriteStreamsSettings; + } + + /** + * Returns the builder for the settings used for calls to flushRows. + * + * @deprecated This method is deprecated and will be removed in the next major version update. + */ + @Deprecated + public UnaryCallSettings.Builder flushRowsSettings() { + return flushRowsSettings; + } + + @Override + public BigQueryWriteStubSettings build() throws IOException { + return new BigQueryWriteStubSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStub.java new file mode 100644 index 000000000000..bdab2777d1ae --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStub.java @@ -0,0 +1,199 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.InternalApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcRawCallableFactory; +import com.google.api.gax.retrying.ExponentialRetryAlgorithm; +import com.google.api.gax.retrying.ScheduledRetryingExecutor; +import com.google.api.gax.retrying.StreamingRetryAlgorithm; +import com.google.api.gax.rpc.Callables; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsExtractor; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.api.gax.tracing.SpanName; +import com.google.api.gax.tracing.TracedServerStreamingCallable; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadGrpc; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadSettings; +import com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse; +import com.google.cloud.bigquery.storage.v1beta2.stub.readrows.ApiResultRetryAlgorithm; +import com.google.cloud.bigquery.storage.v1beta2.stub.readrows.ReadRowsRetryingCallable; +import com.google.common.collect.ImmutableMap; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Enhanced stub class for BigQuery Storage API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +public class EnhancedBigQueryReadStub implements BackgroundResource { + + private static final String TRACING_OUTER_CLIENT_NAME = "BigQueryStorage"; + private final GrpcBigQueryReadStub stub; + private final BigQueryReadStubSettings stubSettings; + private final BigQueryReadSettings.RetryAttemptListener readRowsRetryAttemptListener; + private final ClientContext context; + + public static EnhancedBigQueryReadStub create(EnhancedBigQueryReadStubSettings settings) + throws IOException { + return create(settings, null); + } + + public static EnhancedBigQueryReadStub create( + EnhancedBigQueryReadStubSettings settings, + BigQueryReadSettings.RetryAttemptListener readRowsRetryAttemptListener) + throws IOException { + // Configure the base settings. + BigQueryReadStubSettings.Builder baseSettingsBuilder = + BigQueryReadStubSettings.newBuilder() + .setUniverseDomain(settings.getUniverseDomain()) + .setTransportChannelProvider(settings.getTransportChannelProvider()) + .setEndpoint(settings.getEndpoint()) + .setHeaderProvider(settings.getHeaderProvider()) + .setCredentialsProvider(settings.getCredentialsProvider()) + .setStreamWatchdogCheckInterval(settings.getStreamWatchdogCheckInterval()) + .setStreamWatchdogProvider(settings.getStreamWatchdogProvider()); + + baseSettingsBuilder + .createReadSessionSettings() + .setRetryableCodes(settings.createReadSessionSettings().getRetryableCodes()) + .setRetrySettings(settings.createReadSessionSettings().getRetrySettings()); + + baseSettingsBuilder + .readRowsSettings() + .setRetryableCodes(settings.readRowsSettings().getRetryableCodes()) + .setRetrySettings(settings.readRowsSettings().getRetrySettings()) + .setResumptionStrategy(settings.readRowsSettings().getResumptionStrategy()) + .setIdleTimeout(settings.readRowsSettings().getIdleTimeout()); + + baseSettingsBuilder + .splitReadStreamSettings() + .setRetryableCodes(settings.splitReadStreamSettings().getRetryableCodes()) + .setRetrySettings(settings.splitReadStreamSettings().getRetrySettings()); + + BigQueryReadStubSettings baseSettings = baseSettingsBuilder.build(); + ClientContext clientContext = ClientContext.create(baseSettings); + GrpcBigQueryReadStub stub = new GrpcBigQueryReadStub(baseSettings, clientContext); + return new EnhancedBigQueryReadStub( + stub, baseSettings, readRowsRetryAttemptListener, clientContext); + } + + @InternalApi("Visible for testing") + EnhancedBigQueryReadStub( + GrpcBigQueryReadStub stub, + BigQueryReadStubSettings stubSettings, + BigQueryReadSettings.RetryAttemptListener readRowsRetryAttemptListener, + ClientContext context) { + this.stub = stub; + this.stubSettings = stubSettings; + this.readRowsRetryAttemptListener = readRowsRetryAttemptListener; + this.context = context; + } + + public UnaryCallable createReadSessionCallable() { + return stub.createReadSessionCallable(); + } + + public ServerStreamingCallable readRowsCallable() { + ServerStreamingCallable innerCallable = + GrpcRawCallableFactory.createServerStreamingCallable( + GrpcCallSettings.newBuilder() + .setMethodDescriptor(BigQueryReadGrpc.getReadRowsMethod()) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract(ReadRowsRequest request) { + return ImmutableMap.of( + "read_stream", String.valueOf(request.getReadStream())); + } + }) + .build(), + stubSettings.readRowsSettings().getRetryableCodes()); + ServerStreamingCallSettings callSettings = + stubSettings.readRowsSettings(); + + StreamingRetryAlgorithm retryAlgorithm = + new StreamingRetryAlgorithm<>( + new ApiResultRetryAlgorithm(readRowsRetryAttemptListener), + new ExponentialRetryAlgorithm(callSettings.getRetrySettings(), context.getClock())); + + ScheduledRetryingExecutor retryingExecutor = + new ScheduledRetryingExecutor<>(retryAlgorithm, context.getExecutor()); + + if (context.getStreamWatchdog() != null) { + innerCallable = Callables.watched(innerCallable, callSettings, context); + } + + ReadRowsRetryingCallable outerCallable = + new ReadRowsRetryingCallable( + context.getDefaultCallContext(), + innerCallable, + retryingExecutor, + callSettings.getResumptionStrategy()); + + ServerStreamingCallable traced = + new TracedServerStreamingCallable<>( + outerCallable, + context.getTracerFactory(), + SpanName.of(TRACING_OUTER_CLIENT_NAME, "ReadRows")); + return traced.withDefaultCallContext(context.getDefaultCallContext()); + } + + public UnaryCallable splitReadStreamCallable() { + return stub.splitReadStreamCallable(); + } + + @Override + public void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStubSettings.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStubSettings.java new file mode 100644 index 000000000000..3ec9e498ddaf --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStubSettings.java @@ -0,0 +1,239 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta2.BaseBigQueryReadSettings; +import com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse; +import com.google.cloud.bigquery.storage.v1beta2.stub.readrows.ReadRowsResumptionStrategy; +import com.google.common.collect.ImmutableList; +import java.util.List; + +/** + * Settings class to configure an instance of {@link EnhancedBigQueryReadStub}. + * + *

The default instance dynamically reads and applies the default values used by {@link + * BigQueryReadStub}. + * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of createReadSession to 30 seconds: + * + *

+ * 
+ * EnhancedBigQueryReadStubSettings.Builder builder =
+ *     EnhancedBigQueryReadStubSettings.newBuilder();
+ * builder.createReadSessionSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * EnhancedBigQueryReadStubSettings settings = builder.build();
+ * 
+ * 
+ */ +public class EnhancedBigQueryReadStubSettings + extends StubSettings { + + private final UnaryCallSettings createReadSessionSettings; + private final ServerStreamingCallSettings readRowsSettings; + private final UnaryCallSettings + splitReadStreamSettings; + + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings readRowsSettings() { + return readRowsSettings; + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return BigQueryReadStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "bigquerystorage"; + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return BigQueryReadStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return BigQueryReadStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return BaseBigQueryReadSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return BigQueryReadStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return BigQueryReadStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return new Builder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected EnhancedBigQueryReadStubSettings(Builder settingsBuilder) { + super(settingsBuilder); + + createReadSessionSettings = settingsBuilder.createReadSessionSettings().build(); + readRowsSettings = settingsBuilder.readRowsSettings().build(); + splitReadStreamSettings = settingsBuilder.splitReadStreamSettings().build(); + } + + /** Builder for {@link EnhancedBigQueryReadStubSettings}. */ + public static class Builder + extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + + private final UnaryCallSettings.Builder + createReadSessionSettings; + private final ServerStreamingCallSettings.Builder + readRowsSettings; + private final UnaryCallSettings.Builder + splitReadStreamSettings; + + protected Builder() { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + // Defaults provider + BigQueryReadStubSettings.Builder baseDefaults = BigQueryReadStubSettings.newBuilder(); + setTransportChannelProvider(defaultTransportChannelProvider()); + setCredentialsProvider(baseDefaults.getCredentialsProvider()); + setStreamWatchdogCheckInterval(baseDefaults.getStreamWatchdogCheckInterval()); + setStreamWatchdogProvider(baseDefaults.getStreamWatchdogProvider()); + + // Per-method settings using baseSettings for defaults. + createReadSessionSettings = baseDefaults.createReadSessionSettings(); + splitReadStreamSettings = baseDefaults.splitReadStreamSettings(); + + // Per-method settings using override values for defaults. + readRowsSettings = + baseDefaults.readRowsSettings().setResumptionStrategy(new ReadRowsResumptionStrategy()); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, splitReadStreamSettings); + } + + protected Builder(EnhancedBigQueryReadStubSettings settings) { + super(settings); + + createReadSessionSettings = settings.createReadSessionSettings.toBuilder(); + readRowsSettings = settings.readRowsSettings.toBuilder(); + splitReadStreamSettings = settings.splitReadStreamSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, splitReadStreamSettings); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return readRowsSettings; + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + @Override + public EnhancedBigQueryReadStubSettings build() { + return new EnhancedBigQueryReadStubSettings(this); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadCallableFactory.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadCallableFactory.java new file mode 100644 index 000000000000..ac801d74c455 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadCallableFactory.java @@ -0,0 +1,115 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the BigQueryRead service API. + * + *

This class is for advanced usage. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class GrpcBigQueryReadCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadStub.java new file mode 100644 index 000000000000..5aa8a8b04c44 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadStub.java @@ -0,0 +1,235 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse; +import com.google.longrunning.stub.GrpcOperationsStub; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the BigQueryRead service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@BetaApi +@Generated("by gapic-generator-java") +public class GrpcBigQueryReadStub extends BigQueryReadStub { + private static final MethodDescriptor + createReadSessionMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta2.BigQueryRead/CreateReadSession") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateReadSessionRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ReadSession.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + readRowsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName("google.cloud.bigquery.storage.v1beta2.BigQueryRead/ReadRows") + .setRequestMarshaller(ProtoUtils.marshaller(ReadRowsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ReadRowsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + splitReadStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta2.BigQueryRead/SplitReadStream") + .setRequestMarshaller( + ProtoUtils.marshaller(SplitReadStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(SplitReadStreamResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable createReadSessionCallable; + private final ServerStreamingCallable readRowsCallable; + private final UnaryCallable + splitReadStreamCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcBigQueryReadStub create(BigQueryReadStubSettings settings) + throws IOException { + return new GrpcBigQueryReadStub(settings, ClientContext.create(settings)); + } + + public static final GrpcBigQueryReadStub create(ClientContext clientContext) throws IOException { + return new GrpcBigQueryReadStub(BigQueryReadStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcBigQueryReadStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcBigQueryReadStub( + BigQueryReadStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcBigQueryReadStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryReadStub(BigQueryReadStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcBigQueryReadCallableFactory()); + } + + /** + * Constructs an instance of GrpcBigQueryReadStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryReadStub( + BigQueryReadStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings createReadSessionTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createReadSessionMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "read_session.table", String.valueOf(request.getReadSession().getTable())); + return builder.build(); + }) + .build(); + GrpcCallSettings readRowsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(readRowsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("read_stream", String.valueOf(request.getReadStream())); + return builder.build(); + }) + .build(); + GrpcCallSettings + splitReadStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(splitReadStreamMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + + this.createReadSessionCallable = + callableFactory.createUnaryCallable( + createReadSessionTransportSettings, + settings.createReadSessionSettings(), + clientContext); + this.readRowsCallable = + callableFactory.createServerStreamingCallable( + readRowsTransportSettings, settings.readRowsSettings(), clientContext); + this.splitReadStreamCallable = + callableFactory.createUnaryCallable( + splitReadStreamTransportSettings, settings.splitReadStreamSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable createReadSessionCallable() { + return createReadSessionCallable; + } + + @Override + public ServerStreamingCallable readRowsCallable() { + return readRowsCallable; + } + + @Override + public UnaryCallable splitReadStreamCallable() { + return splitReadStreamCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteCallableFactory.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteCallableFactory.java new file mode 100644 index 000000000000..1d219f871cc9 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteCallableFactory.java @@ -0,0 +1,118 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the BigQueryWrite service API. + * + *

This class is for advanced usage. + * + * @deprecated This class is deprecated and will be removed in the next major version update. + */ +@BetaApi +@Deprecated +@Generated("by gapic-generator-java") +public class GrpcBigQueryWriteCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteStub.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteStub.java new file mode 100644 index 000000000000..8c2de6b90d45 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteStub.java @@ -0,0 +1,345 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.WriteStream; +import com.google.longrunning.stub.GrpcOperationsStub; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the BigQueryWrite service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + * + * @deprecated This class is deprecated and will be removed in the next major version update. + */ +@BetaApi +@Deprecated +@Generated("by gapic-generator-java") +public class GrpcBigQueryWriteStub extends BigQueryWriteStub { + private static final MethodDescriptor + createWriteStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta2.BigQueryWrite/CreateWriteStream") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(WriteStream.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + appendRowsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName("google.cloud.bigquery.storage.v1beta2.BigQueryWrite/AppendRows") + .setRequestMarshaller(ProtoUtils.marshaller(AppendRowsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(AppendRowsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getWriteStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta2.BigQueryWrite/GetWriteStream") + .setRequestMarshaller( + ProtoUtils.marshaller(GetWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(WriteStream.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + finalizeWriteStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta2.BigQueryWrite/FinalizeWriteStream") + .setRequestMarshaller( + ProtoUtils.marshaller(FinalizeWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(FinalizeWriteStreamResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta2.BigQueryWrite/BatchCommitWriteStreams") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchCommitWriteStreamsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(BatchCommitWriteStreamsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + flushRowsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.bigquery.storage.v1beta2.BigQueryWrite/FlushRows") + .setRequestMarshaller(ProtoUtils.marshaller(FlushRowsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(FlushRowsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable createWriteStreamCallable; + private final BidiStreamingCallable appendRowsCallable; + private final UnaryCallable getWriteStreamCallable; + private final UnaryCallable + finalizeWriteStreamCallable; + private final UnaryCallable + batchCommitWriteStreamsCallable; + private final UnaryCallable flushRowsCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcBigQueryWriteStub create(BigQueryWriteStubSettings settings) + throws IOException { + return new GrpcBigQueryWriteStub(settings, ClientContext.create(settings)); + } + + public static final GrpcBigQueryWriteStub create(ClientContext clientContext) throws IOException { + return new GrpcBigQueryWriteStub(BigQueryWriteStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcBigQueryWriteStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcBigQueryWriteStub( + BigQueryWriteStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcBigQueryWriteStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryWriteStub(BigQueryWriteStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcBigQueryWriteCallableFactory()); + } + + /** + * Constructs an instance of GrpcBigQueryWriteStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryWriteStub( + BigQueryWriteStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings createWriteStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createWriteStreamMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings appendRowsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(appendRowsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("write_stream", String.valueOf(request.getWriteStream())); + return builder.build(); + }) + .build(); + GrpcCallSettings getWriteStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getWriteStreamMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + finalizeWriteStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(finalizeWriteStreamMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + batchCommitWriteStreamsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(batchCommitWriteStreamsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings flushRowsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(flushRowsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("write_stream", String.valueOf(request.getWriteStream())); + return builder.build(); + }) + .build(); + + this.createWriteStreamCallable = + callableFactory.createUnaryCallable( + createWriteStreamTransportSettings, + settings.createWriteStreamSettings(), + clientContext); + this.appendRowsCallable = + callableFactory.createBidiStreamingCallable( + appendRowsTransportSettings, settings.appendRowsSettings(), clientContext); + this.getWriteStreamCallable = + callableFactory.createUnaryCallable( + getWriteStreamTransportSettings, settings.getWriteStreamSettings(), clientContext); + this.finalizeWriteStreamCallable = + callableFactory.createUnaryCallable( + finalizeWriteStreamTransportSettings, + settings.finalizeWriteStreamSettings(), + clientContext); + this.batchCommitWriteStreamsCallable = + callableFactory.createUnaryCallable( + batchCommitWriteStreamsTransportSettings, + settings.batchCommitWriteStreamsSettings(), + clientContext); + this.flushRowsCallable = + callableFactory.createUnaryCallable( + flushRowsTransportSettings, settings.flushRowsSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable createWriteStreamCallable() { + return createWriteStreamCallable; + } + + @Override + public BidiStreamingCallable appendRowsCallable() { + return appendRowsCallable; + } + + @Override + public UnaryCallable getWriteStreamCallable() { + return getWriteStreamCallable; + } + + @Override + public UnaryCallable + finalizeWriteStreamCallable() { + return finalizeWriteStreamCallable; + } + + @Override + public UnaryCallable + batchCommitWriteStreamsCallable() { + return batchCommitWriteStreamsCallable; + } + + @Override + public UnaryCallable flushRowsCallable() { + return flushRowsCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ApiResultRetryAlgorithm.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ApiResultRetryAlgorithm.java new file mode 100644 index 000000000000..f4c07a0b6146 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ApiResultRetryAlgorithm.java @@ -0,0 +1,89 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.stub.readrows; + +import com.google.api.core.InternalApi; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.retrying.TimedAttemptSettings; +import com.google.api.gax.rpc.ApiException; +import com.google.cloud.bigquery.storage.util.Errors; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadSettings; +import io.grpc.Metadata; +import io.grpc.Status; +import java.time.Duration; + +/** For internal use, public for technical reasons. */ +@InternalApi +public class ApiResultRetryAlgorithm implements ResultRetryAlgorithm { + // Duration to sleep on if the error is DEADLINE_EXCEEDED. + public static final Duration DEADLINE_SLEEP_DURATION = Duration.ofMillis(1); + + private final BigQueryReadSettings.RetryAttemptListener retryAttemptListener; + + public ApiResultRetryAlgorithm() { + this(null); + } + + public ApiResultRetryAlgorithm(BigQueryReadSettings.RetryAttemptListener retryAttemptListener) { + super(); + this.retryAttemptListener = retryAttemptListener; + } + + @Override + public TimedAttemptSettings createNextAttempt( + Throwable prevThrowable, ResponseT prevResponse, TimedAttemptSettings prevSettings) { + if (prevThrowable != null) { + Status status = Status.fromThrowable(prevThrowable); + Metadata metadata = Status.trailersFromThrowable(prevThrowable); + Errors.IsRetryableStatusResult result = Errors.isRetryableStatus(status, metadata); + if (result.isRetryable) { + // If result.retryDelay isn't null, we know exactly how long we must wait, so both regular + // and randomized delays are the same. + Duration retryDelay = result.retryDelay; + Duration randomizedRetryDelay = result.retryDelay; + if (retryDelay == null) { + retryDelay = prevSettings.getRetryDelayDuration(); + randomizedRetryDelay = DEADLINE_SLEEP_DURATION; + } + if (retryAttemptListener != null) { + retryAttemptListener.onRetryAttempt(status, metadata); + } + return TimedAttemptSettings.newBuilder() + .setGlobalSettings(prevSettings.getGlobalSettings()) + .setRetryDelayDuration(retryDelay) + .setRpcTimeout(prevSettings.getRpcTimeout()) + .setRandomizedRetryDelayDuration(randomizedRetryDelay) + .setAttemptCount(prevSettings.getAttemptCount() + 1) + .setFirstAttemptStartTimeNanos(prevSettings.getFirstAttemptStartTimeNanos()) + .build(); + } + } + return null; + } + + @Override + public boolean shouldRetry(Throwable prevThrowable, ResponseT prevResponse) { + if (prevThrowable != null) { + Status status = Status.fromThrowable(prevThrowable); + Metadata metadata = Status.trailersFromThrowable(prevThrowable); + if (Errors.isRetryableStatus(status, metadata).isRetryable) { + return true; + } + } + return (prevThrowable instanceof ApiException) && ((ApiException) prevThrowable).isRetryable(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsAttemptCallable.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsAttemptCallable.java new file mode 100644 index 000000000000..2d9cb1607baf --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsAttemptCallable.java @@ -0,0 +1,326 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.stub.readrows; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.retrying.ServerStreamingAttemptException; +import com.google.api.gax.retrying.StreamResumptionStrategy; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StateCheckingResponseObserver; +import com.google.api.gax.rpc.StreamController; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.common.base.Preconditions; +import java.time.Duration; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import javax.annotation.concurrent.GuardedBy; + +final class ReadRowsAttemptCallable implements Callable { + private final Object lock = new Object(); + + private final ServerStreamingCallable innerCallable; + private final StreamResumptionStrategy resumptionStrategy; + private final ReadRowsRequest initialRequest; + private ApiCallContext context; + private final ResponseObserver outerObserver; + + // Start state + private boolean autoFlowControl = true; + private boolean isStarted; + + // Outer state + @GuardedBy("lock") + private Throwable cancellationCause; + + @GuardedBy("lock") + private int pendingRequests; + + private RetryingFuture outerRetryingFuture; + + // Internal retry state + private int numAttempts; + + @GuardedBy("lock") + private StreamController innerController; + + private boolean seenSuccessSinceLastError; + private SettableApiFuture innerAttemptFuture; + + ReadRowsAttemptCallable( + ServerStreamingCallable innerCallable, + StreamResumptionStrategy resumptionStrategy, + ReadRowsRequest initialRequest, + ApiCallContext context, + ResponseObserver outerObserver) { + this.innerCallable = innerCallable; + this.resumptionStrategy = resumptionStrategy; + this.initialRequest = initialRequest; + this.context = context; + this.outerObserver = outerObserver; + } + + /** Sets controlling {@link RetryingFuture}. Must be called be before {@link #start()}. */ + void setExternalFuture(RetryingFuture retryingFuture) { + Preconditions.checkState(!isStarted, "Can't change the RetryingFuture once the call has start"); + Preconditions.checkNotNull(retryingFuture, "RetryingFuture can't be null"); + + this.outerRetryingFuture = retryingFuture; + } + + /** + * Starts the initial call. The call is attempted on the caller's thread. Further call attempts + * will be scheduled by the {@link RetryingFuture}. + */ + public void start() { + Preconditions.checkState(!isStarted, "Already started"); + + // Initialize the outer observer + outerObserver.onStart( + new StreamController() { + @Override + public void disableAutoInboundFlowControl() { + Preconditions.checkState( + !isStarted, "Can't disable auto flow control once the stream is started"); + autoFlowControl = false; + } + + @Override + public void request(int count) { + onRequest(count); + } + + @Override + public void cancel() { + onCancel(); + } + }); + + if (autoFlowControl) { + synchronized (lock) { + pendingRequests = Integer.MAX_VALUE; + } + } + isStarted = true; + + // Propagate the totalTimeout as the overall stream deadline. + Duration totalTimeout = + outerRetryingFuture.getAttemptSettings().getGlobalSettings().getTotalTimeoutDuration(); + + if (totalTimeout != null && context != null) { + context = context.withTimeoutDuration(totalTimeout); + } + + // Call the inner callable + call(); + } + + /** + * Sends the actual RPC. The request being sent will first be transformed by the {@link + * StreamResumptionStrategy}. + * + *

This method expects to be called by one thread at a time. Furthermore, it expects that the + * current RPC finished before the next time it's called. + */ + @Override + public Void call() { + Preconditions.checkState(isStarted, "Must be started first"); + + ReadRowsRequest request = + (++numAttempts == 1) ? initialRequest : resumptionStrategy.getResumeRequest(initialRequest); + + // Should never happen. onAttemptError will check if ResumptionStrategy can create a resume + // request, + // which the RetryingFuture/StreamResumptionStrategy should respect. + Preconditions.checkState(request != null, "ResumptionStrategy returned a null request."); + + innerAttemptFuture = SettableApiFuture.create(); + seenSuccessSinceLastError = false; + + ApiCallContext attemptContext = context; + + if (!outerRetryingFuture.getAttemptSettings().getRpcTimeout().isZero()) { + attemptContext = + attemptContext.withStreamWaitTimeout( + outerRetryingFuture.getAttemptSettings().getRpcTimeout()); + } + + attemptContext + .getTracer() + .attemptStarted(outerRetryingFuture.getAttemptSettings().getOverallAttemptCount()); + + innerCallable.call( + request, + new StateCheckingResponseObserver() { + @Override + public void onStartImpl(StreamController controller) { + onAttemptStart(controller); + } + + @Override + public void onResponseImpl(ReadRowsResponse response) { + onAttemptResponse(response); + } + + @Override + public void onErrorImpl(Throwable t) { + onAttemptError(t); + } + + @Override + public void onCompleteImpl() { + onAttemptComplete(); + } + }, + attemptContext); + + outerRetryingFuture.setAttemptFuture(innerAttemptFuture); + + return null; + } + + /** + * Called by the inner {@link ServerStreamingCallable} when the call is about to start. This will + * transfer unfinished state from the previous attempt. + * + * @see ResponseObserver#onStart(StreamController) + */ + private void onAttemptStart(StreamController controller) { + if (!autoFlowControl) { + controller.disableAutoInboundFlowControl(); + } + + Throwable localCancellationCause; + int numToRequest = 0; + + synchronized (lock) { + innerController = controller; + + localCancellationCause = this.cancellationCause; + + if (!autoFlowControl) { + numToRequest = pendingRequests; + } + } + + if (localCancellationCause != null) { + controller.cancel(); + } else if (numToRequest > 0) { + controller.request(numToRequest); + } + } + + /** + * Called when the outer {@link ResponseObserver} wants to prematurely cancel the stream. + * + * @see StreamController#cancel() + */ + private void onCancel() { + StreamController localInnerController; + + synchronized (lock) { + if (cancellationCause != null) { + return; + } + // NOTE: BasicRetryingFuture will replace j.u.c.CancellationExceptions with it's own, + // which will not have the current stacktrace, so a special wrapper has be used here. + cancellationCause = + new ServerStreamingAttemptException( + new CancellationException("User cancelled stream"), + resumptionStrategy.canResume(), + seenSuccessSinceLastError); + localInnerController = innerController; + } + + if (localInnerController != null) { + localInnerController.cancel(); + } + } + + /** + * Called when the outer {@link ResponseObserver} is ready for more data. + * + * @see StreamController#request(int) + */ + private void onRequest(int count) { + Preconditions.checkState(!autoFlowControl, "Automatic flow control is enabled"); + Preconditions.checkArgument(count > 0, "Count must be > 0"); + + final StreamController localInnerController; + + synchronized (lock) { + int maxInc = Integer.MAX_VALUE - pendingRequests; + count = Math.min(maxInc, count); + + pendingRequests += count; + localInnerController = this.innerController; + } + + // Note: there is a race condition here where the count might go to the previous attempt's + // StreamController after it failed. But it doesn't matter, because the controller will just + // ignore it and the current controller will pick it up onStart. + if (localInnerController != null) { + localInnerController.request(count); + } + } + + /** Called when the inner callable has responses to deliver. */ + private void onAttemptResponse(ReadRowsResponse message) { + if (!autoFlowControl) { + synchronized (lock) { + pendingRequests--; + } + } + // Update local state to allow for future resume. + seenSuccessSinceLastError = true; + message = resumptionStrategy.processResponse(message); + // Notify the outer observer. + outerObserver.onResponse(message); + } + + /** + * Called when the current RPC fails. The error will be bubbled up to the outer {@link + * RetryingFuture} via the {@link #innerAttemptFuture}. + */ + private void onAttemptError(Throwable throwable) { + Throwable localCancellationCause; + synchronized (lock) { + localCancellationCause = cancellationCause; + } + + if (localCancellationCause != null) { + // Take special care to preserve the cancellation's stack trace. + innerAttemptFuture.setException(localCancellationCause); + } else { + // Wrap the original exception and provide more context for StreamingRetryAlgorithm. + innerAttemptFuture.setException( + new ServerStreamingAttemptException( + throwable, resumptionStrategy.canResume(), seenSuccessSinceLastError)); + } + } + + /** + * Called when the current RPC successfully completes. Notifies the outer {@link RetryingFuture} + * via {@link #innerAttemptFuture}. + */ + private void onAttemptComplete() { + innerAttemptFuture.set(null); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsResumptionStrategy.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsResumptionStrategy.java new file mode 100644 index 000000000000..8429c5405355 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsResumptionStrategy.java @@ -0,0 +1,72 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2.stub.readrows; + +import com.google.api.core.InternalApi; +import com.google.api.gax.retrying.StreamResumptionStrategy; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import javax.annotation.Nonnull; + +/** + * An implementation of a {@link StreamResumptionStrategy} for the ReadRows API. This class tracks + * the offset of the last row received and, upon retry, attempts to resume the stream at the next + * offset. + * + *

This class is considered an internal implementation detail and not meant to be used by + * applications. + */ +@InternalApi +public class ReadRowsResumptionStrategy + implements StreamResumptionStrategy { + + // Number of rows processed. + private long rowsProcessed = 0; + + @Override + @Nonnull + public StreamResumptionStrategy createNew() { + return new ReadRowsResumptionStrategy(); + } + + @Override + @Nonnull + public ReadRowsResponse processResponse(ReadRowsResponse response) { + rowsProcessed += response.getRowCount(); + return response; + } + + /** + * {@inheritDoc} + * + *

Given the initial/original request, this implementation generates a request that will yield + * a new stream whose first response would come right after the last response received by + * processResponse. It takes into account the offset from the original request. + */ + @Override + public ReadRowsRequest getResumeRequest(ReadRowsRequest originalRequest) { + ReadRowsRequest.Builder resumeRequestBuilder = originalRequest.toBuilder(); + + resumeRequestBuilder.setOffset(originalRequest.getOffset() + rowsProcessed); + + return resumeRequestBuilder.build(); + } + + @Override + public boolean canResume() { + return true; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsRetryingCallable.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsRetryingCallable.java new file mode 100644 index 000000000000..22e6da21a2a9 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsRetryingCallable.java @@ -0,0 +1,91 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.stub.readrows; + +import static com.google.common.util.concurrent.MoreExecutors.directExecutor; + +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.retrying.ScheduledRetryingExecutor; +import com.google.api.gax.retrying.ServerStreamingAttemptException; +import com.google.api.gax.retrying.StreamResumptionStrategy; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; + +public final class ReadRowsRetryingCallable + extends ServerStreamingCallable { + + private final ApiCallContext context; + private final ServerStreamingCallable innerCallable; + private final ScheduledRetryingExecutor executor; + private final StreamResumptionStrategy + resumptionStrategyPrototype; + + public ReadRowsRetryingCallable( + ApiCallContext context, + ServerStreamingCallable innerCallable, + ScheduledRetryingExecutor executor, + StreamResumptionStrategy resumptionStrategyPrototype) { + this.context = context; + this.innerCallable = innerCallable; + this.executor = executor; + this.resumptionStrategyPrototype = resumptionStrategyPrototype; + } + + @Override + public void call( + ReadRowsRequest request, + final ResponseObserver responseObserver, + ApiCallContext context) { + ApiCallContext actualContext = this.context.merge(context); + ReadRowsAttemptCallable attemptCallable = + new ReadRowsAttemptCallable( + innerCallable, + resumptionStrategyPrototype.createNew(), + request, + actualContext, + responseObserver); + + RetryingFuture retryingFuture = executor.createFuture(attemptCallable, actualContext); + attemptCallable.setExternalFuture(retryingFuture); + attemptCallable.start(); + + // Bridge the future result back to the external responseObserver + ApiFutures.addCallback( + retryingFuture, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable throwable) { + // Make sure to unwrap the underlying ApiException + if (throwable instanceof ServerStreamingAttemptException) { + throwable = throwable.getCause(); + } + responseObserver.onError(throwable); + } + + @Override + public void onSuccess(Void ignored) { + responseObserver.onComplete(); + } + }, + directExecutor()); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/package-info.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/package-info.java new file mode 100644 index 000000000000..13ab6cdcbf77 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/package-info.java @@ -0,0 +1,16 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2.stub.readrows; diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1/reflect-config.json b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1/reflect-config.json new file mode 100644 index 000000000000..d25add38a63d --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1/reflect-config.json @@ -0,0 +1,2198 @@ +[ + { + "name": "com.google.api.ClientLibraryDestination", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibraryOrganization", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.LaunchStage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$History", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Style", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AppendRowsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AppendRowsRequest$ArrowData", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AppendRowsRequest$ArrowData$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AppendRowsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AppendRowsRequest$MissingValueInterpretation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AppendRowsRequest$ProtoData", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AppendRowsRequest$ProtoData$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AppendRowsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AppendRowsResponse$AppendResult", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AppendRowsResponse$AppendResult$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AppendRowsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ArrowRecordBatch", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ArrowRecordBatch$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ArrowSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ArrowSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions$CompressionCodec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions$PicosTimestampPrecision", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AvroRows", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AvroRows$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AvroSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AvroSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AvroSerializationOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AvroSerializationOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.AvroSerializationOptions$PicosTimestampPrecision", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.DataFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.FlushRowsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.FlushRowsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.FlushRowsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.FlushRowsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ProtoRows", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ProtoRows$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ProtoSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ProtoSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadRowsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadRowsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadRowsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadRowsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadSession", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadSession$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadSession$TableModifiers", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadSession$TableModifiers$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadSession$TableReadOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadSession$TableReadOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadSession$TableReadOptions$ResponseCompressionCodec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadStream", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ReadStream$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.RowError", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.RowError$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.RowError$RowErrorCode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.StorageError", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.StorageError$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.StorageError$StorageErrorCode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.StreamStats", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.StreamStats$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.StreamStats$Progress", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.StreamStats$Progress$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.TableFieldSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.TableFieldSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.TableFieldSchema$FieldElementType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.TableFieldSchema$FieldElementType$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.TableFieldSchema$Mode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.TableFieldSchema$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.TableSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.TableSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ThrottleState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.ThrottleState$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.WriteStream", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.WriteStream$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.WriteStream$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.WriteStream$WriteMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1.WriteStreamView", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.BoolValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.BoolValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.BytesValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.BytesValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$VerificationState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$FieldPresence", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$JsonFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$MessageEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$RepeatedFieldEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Utf8Validation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Label", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$CType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$JSType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionTargetType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$OptimizeMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Semantic", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$IdempotencyLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DoubleValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DoubleValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FloatValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FloatValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Int32Value", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Int32Value$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Int64Value", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Int64Value$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.StringValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.StringValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.UInt32Value", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.UInt32Value$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.UInt64Value", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.UInt64Value$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1alpha/reflect-config.json b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1alpha/reflect-config.json new file mode 100644 index 000000000000..3ebb7d9d7819 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1alpha/reflect-config.json @@ -0,0 +1,1577 @@ +[ + { + "name": "com.google.api.ClientLibraryDestination", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibraryOrganization", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.LaunchStage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$History", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Style", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.FieldSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.FieldSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.MetastorePartition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.MetastorePartition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.ReadStream", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.ReadStream$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.SerDeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.SerDeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.StreamList", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.StreamList$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$VerificationState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$FieldPresence", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$JsonFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$MessageEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$RepeatedFieldEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Utf8Validation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Label", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$CType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$JSType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionTargetType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$OptimizeMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Semantic", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$IdempotencyLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta/reflect-config.json b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta/reflect-config.json new file mode 100644 index 000000000000..e5f85989c9f2 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta/reflect-config.json @@ -0,0 +1,1577 @@ +[ + { + "name": "com.google.api.ClientLibraryDestination", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibraryOrganization", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.LaunchStage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$History", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Style", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.FieldSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.FieldSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.MetastorePartition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.MetastorePartition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.ReadStream", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.ReadStream$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.SerDeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.SerDeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.StorageDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.StorageDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.StreamList", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.StreamList$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$VerificationState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$FieldPresence", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$JsonFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$MessageEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$RepeatedFieldEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Utf8Validation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Label", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$CType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$JSType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionTargetType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$OptimizeMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Semantic", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$IdempotencyLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta1/reflect-config.json b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta1/reflect-config.json new file mode 100644 index 000000000000..0e0aeab81aac --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta1/reflect-config.json @@ -0,0 +1,1595 @@ +[ + { + "name": "com.google.api.ClientLibraryDestination", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibraryOrganization", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.LaunchStage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$History", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Style", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.ArrowProto$ArrowRecordBatch", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.ArrowProto$ArrowRecordBatch$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.ArrowProto$ArrowSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.ArrowProto$ArrowSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.AvroProto$AvroRows", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.AvroProto$AvroRows$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.AvroProto$AvroSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.AvroProto$AvroSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.ReadOptions$TableReadOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.ReadOptions$TableReadOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$BatchCreateReadSessionStreamsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$BatchCreateReadSessionStreamsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$BatchCreateReadSessionStreamsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$BatchCreateReadSessionStreamsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$CreateReadSessionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$CreateReadSessionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$DataFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$FinalizeStreamRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$FinalizeStreamRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$Progress", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$Progress$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$ReadRowsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$ReadRowsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$ReadRowsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$ReadRowsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$ReadSession", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$ReadSession$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$ShardingStrategy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$SplitReadStreamRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$SplitReadStreamRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$SplitReadStreamResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$SplitReadStreamResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$Stream", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$Stream$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$StreamPosition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$StreamPosition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$StreamStatus", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$StreamStatus$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$ThrottleStatus", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.Storage$ThrottleStatus$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto$TableModifiers", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto$TableModifiers$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto$TableReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto$TableReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$VerificationState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$FieldPresence", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$JsonFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$MessageEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$RepeatedFieldEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Utf8Validation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Label", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$CType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$JSType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionTargetType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$OptimizeMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Semantic", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$IdempotencyLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta2/reflect-config.json b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta2/reflect-config.json new file mode 100644 index 000000000000..8003317ef072 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta2/reflect-config.json @@ -0,0 +1,2063 @@ +[ + { + "name": "com.google.api.ClientLibraryDestination", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibraryOrganization", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.LaunchStage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$History", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Style", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest$ProtoData", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest$ProtoData$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse$AppendResult", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse$AppendResult$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ArrowSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ArrowSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions$Format", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.AvroRows", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.AvroRows$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.AvroSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.AvroSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.DataFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ProtoRows", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ProtoRows$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ProtoSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ProtoSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ReadSession", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ReadSession$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ReadSession$TableModifiers", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ReadSession$TableModifiers$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ReadSession$TableReadOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ReadSession$TableReadOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ReadStream", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ReadStream$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.StorageError", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.StorageError$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.StorageError$StorageErrorCode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.StreamStats", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.StreamStats$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.StreamStats$Progress", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.StreamStats$Progress$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema$Mode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.TableSchema", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.TableSchema$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ThrottleState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.ThrottleState$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.WriteStream", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.WriteStream$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.bigquery.storage.v1beta2.WriteStream$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.BoolValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.BoolValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.BytesValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.BytesValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$VerificationState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$FieldPresence", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$JsonFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$MessageEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$RepeatedFieldEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Utf8Validation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Label", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$CType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$JSType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionTargetType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$OptimizeMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Semantic", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$IdempotencyLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DoubleValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DoubleValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FloatValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FloatValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Int32Value", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Int32Value$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Int64Value", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Int64Value$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.StringValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.StringValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.UInt32Value", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.UInt32Value$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.UInt64Value", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.UInt64Value$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/reflect-config.json b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/reflect-config.json new file mode 100644 index 000000000000..213bb0f577b5 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/reflect-config.json @@ -0,0 +1,114 @@ +[ + { + "name":"java.lang.Object", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "methods":[{"name":"","parameterTypes":[] }]}, + { + "name": "com.google.rpc.RetryInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.RetryInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name":"org.apache.arrow.memory.BaseAllocator", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.BaseAllocator$Config", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.DefaultAllocationManagerOption", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.netty.NettyAllocationManager$1", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.netty.DefaultAllocationManagerFactory", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.RootAllocator", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.vector.types.pojo.ArrowType", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.vector.types.pojo.ArrowType$Int", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.vector.types.pojo.ArrowType$PrimitiveType", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true + }, + { + "name": "org.apache.arrow.vector.types.pojo.DictionaryEncoding", + "allDeclaredFields": true + }, + { + "name": "org.apache.arrow.vector.types.pojo.Field", + "allDeclaredFields": true + }, + { + "name": "org.apache.arrow.vector.types.pojo.Schema", + "allDeclaredFields": true + }, + { + "name":"io.netty.buffer.AbstractReferenceCountedByteBuf", + "fields":[{"name":"refCnt"}] + } +] \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/resource-config.json b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/resource-config.json new file mode 100644 index 000000000000..2b2ee30f5fff --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/resource-config.json @@ -0,0 +1,26 @@ +{ + "resources":{ + "includes":[ + { + "pattern":"\\Qorg/apache/arrow/memory/DefaultAllocationManagerFactory.class\\E" + }, + { + "pattern":"\\Qorg/apache/arrow/memory/netty/DefaultAllocationManagerFactory.class\\E" + }, + { + "pattern":"\\Qorg/apache/arrow/memory/unsafe/DefaultAllocationManagerFactory.class\\E" + } + ] + }, + "globs":[ + { + "glob": "org/apache/arrow/memory/DefaultAllocationManagerFactory.class" + }, + { + "glob": "org/apache/arrow/memory/netty/DefaultAllocationManagerFactory.class" + }, + { + "glob": "org/apache/arrow/memory/unsafe/DefaultAllocationManagerFactory.class" + } + ] +} \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/util/ErrorsTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/util/ErrorsTest.java new file mode 100644 index 000000000000..29c7a67b1d29 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/util/ErrorsTest.java @@ -0,0 +1,132 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.protobuf.Duration; +import com.google.protobuf.Parser; +import com.google.rpc.RetryInfo; +import io.grpc.Metadata; +import io.grpc.Status; +import org.junit.jupiter.api.Test; + +class ErrorsTest { + + @Test + void testRetryableInternalForRstErrors() { + assertTrue( + Errors.isRetryableInternalStatus( + Status.INTERNAL.withDescription( + "HTTP/2 error code: INTERNAL_ERROR\nReceived Rst stream"))); + assertTrue( + Errors.isRetryableInternalStatus( + Status.INTERNAL.withDescription( + "RST_STREAM closed stream. HTTP/2 error code: INTERNAL_ERROR"))); + assertTrue(Errors.isRetryableInternalStatus(Status.INTERNAL.withDescription("Rst Stream"))); + } + + @Test + void testNonRetryableInternalError() { + assertFalse(Errors.isRetryableInternalStatus(Status.INTERNAL)); + assertFalse(Errors.isRetryableInternalStatus(Status.INTERNAL.withDescription("Server error."))); + } + + @Test + void testNonRetryableOtherError() { + assertFalse( + Errors.isRetryableInternalStatus( + Status.DATA_LOSS.withDescription( + "RST_STREAM closed stream. HTTP/2 error code: INTERNAL_ERROR"))); + } + + @Test + void testIsRetryableStatus() { + Errors.IsRetryableStatusResult result = + Errors.isRetryableStatus( + Status.INTERNAL.withDescription( + "HTTP/2 error code: INTERNAL_ERROR\nReceived Rst stream"), + null); + assertTrue(result.isRetryable); + assertNull(result.retryDelay); + + result = + Errors.isRetryableStatus( + Status.INTERNAL.withDescription( + "RST_STREAM closed stream. HTTP/2 error code: INTERNAL_ERROR"), + null); + assertTrue(result.isRetryable); + assertNull(result.retryDelay); + + Metadata metadata = new Metadata(); + metadata.put( + Metadata.Key.of( + "some-key-bin", + new Metadata.BinaryMarshaller() { + @Override + public byte[] toBytes(Integer value) { + return new byte[] {}; + } + + @Override + public Integer parseBytes(byte[] serialized) { + return new Integer(1); + } + }), + new Integer(2)); + result = + Errors.isRetryableStatus( + Status.RESOURCE_EXHAUSTED.withDescription("You have run out of X quota"), metadata); + assertFalse(result.isRetryable); + assertNull(result.retryDelay); + + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay(Duration.newBuilder().setSeconds(123).setNanos(456).build()) + .build(); + + metadata = new Metadata(); + metadata.put( + Metadata.Key.of( + "google.rpc.retryinfo-bin", + new Metadata.BinaryMarshaller() { + @Override + public byte[] toBytes(RetryInfo value) { + return value.toByteArray(); + } + + @Override + public RetryInfo parseBytes(byte[] serialized) { + try { + Parser parser = (RetryInfo.newBuilder().build()).getParserForType(); + return parser.parseFrom(serialized); + } catch (Exception e) { + return null; + } + } + }), + retryInfo); + + result = + Errors.isRetryableStatus( + Status.RESOURCE_EXHAUSTED.withDescription("Stop for a while"), metadata); + assertTrue(result.isRetryable); + assertEquals(result.retryDelay, java.time.Duration.ofSeconds(123, 456)); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptorTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptorTest.java new file mode 100644 index 000000000000..daf86f68ea04 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptorTest.java @@ -0,0 +1,724 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.cloud.bigquery.storage.test.JsonTest; +import com.google.cloud.bigquery.storage.test.SchemaTest; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.DescriptorProtos; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.FieldDescriptor; +import com.google.protobuf.Int64Value; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.Test; + +class BQTableSchemaToProtoDescriptorTest { + // This is a map between the TableFieldSchema.Type and the descriptor it is supposed to + // produce. The produced descriptor will be used to check against the entry values here. + private static Map BQTableTypeToCorrectProtoDescriptorTest = + new ImmutableMap.Builder() + .put(TableFieldSchema.Type.BOOL, SchemaTest.BoolType.getDescriptor()) + .put(TableFieldSchema.Type.BYTES, SchemaTest.BytesType.getDescriptor()) + .put(TableFieldSchema.Type.DATE, SchemaTest.Int32Type.getDescriptor()) + .put(TableFieldSchema.Type.DATETIME, SchemaTest.Int64Type.getDescriptor()) + .put(TableFieldSchema.Type.DOUBLE, SchemaTest.DoubleType.getDescriptor()) + .put(TableFieldSchema.Type.GEOGRAPHY, SchemaTest.StringType.getDescriptor()) + .put(TableFieldSchema.Type.INT64, SchemaTest.Int64Type.getDescriptor()) + .put(TableFieldSchema.Type.NUMERIC, SchemaTest.BytesType.getDescriptor()) + .put(TableFieldSchema.Type.STRING, SchemaTest.StringType.getDescriptor()) + .put(TableFieldSchema.Type.TIME, SchemaTest.Int64Type.getDescriptor()) + .put(TableFieldSchema.Type.TIMESTAMP, SchemaTest.Int64Type.getDescriptor()) + .build(); + + // Creates mapping from descriptor to how many times it was reused. + private void mapDescriptorToCount(Descriptor descriptor, HashMap map) { + for (FieldDescriptor field : descriptor.getFields()) { + if (field.getType() == FieldDescriptor.Type.MESSAGE) { + Descriptor subDescriptor = field.getMessageType(); + String messageName = subDescriptor.getName(); + if (map.containsKey(messageName)) { + map.put(messageName, map.get(messageName) + 1); + } else { + map.put(messageName, 1); + } + mapDescriptorToCount(subDescriptor, map); + } + } + } + + // Checks that two descriptors are the same by the check the fields inside the descriptors. + // Checks that each descriptor has the same number of fields and that each field has the same + // type and mode on the message. If a field is a nested message, then it recursively checks the + // fields inside each nested message. + private void assertDescriptorsAreEqual(Descriptor expected, Descriptor actual) { + // Check same number of fields + assertEquals(actual.getFields().size(), expected.getFields().size()); + for (FieldDescriptor convertedField : actual.getFields()) { + // Check field name + FieldDescriptor expectedField = expected.findFieldByName(convertedField.getName()); + assertNotNull(expectedField); + // Check type + FieldDescriptor.Type convertedType = convertedField.getType(); + FieldDescriptor.Type originalType = expectedField.getType(); + assertEquals(convertedType, originalType, convertedField.getName()); + // Check mode + assertEquals(expectedField.isRepeated(), convertedField.isRepeated()); + assertEquals(expectedField.isRequired(), convertedField.isRequired()); + assertEquals(expectedField.isOptional(), convertedField.isOptional()); + // Recursively check nested messages + if (convertedType == FieldDescriptor.Type.MESSAGE) { + assertDescriptorsAreEqual(expectedField.getMessageType(), convertedField.getMessageType()); + } + } + } + + @Test + void testSimpleTypes() throws Exception { + for (Map.Entry entry : + BQTableTypeToCorrectProtoDescriptorTest.entrySet()) { + final TableFieldSchema tableFieldSchema = + TableFieldSchema.newBuilder() + .setType(entry.getKey()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_field_type") + .build(); + final TableSchema tableSchema = + TableSchema.newBuilder().addFields(0, tableFieldSchema).build(); + final Descriptor descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); + assertDescriptorsAreEqual(entry.getValue(), descriptor); + } + } + + // BQ Timestamp field with higher precision (12) is mapped to a String protobuf type (not int64) + @Test + void testTimestampType_higherTimestampPrecision() + throws Descriptors.DescriptorValidationException { + TableFieldSchema tableFieldSchema = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setTimestampPrecision(Int64Value.newBuilder().setValue(12).build()) + .setName("test_field_type") + .build(); + TableSchema tableSchema = TableSchema.newBuilder().addFields(0, tableFieldSchema).build(); + Descriptor descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); + assertDescriptorsAreEqual(SchemaTest.StringType.getDescriptor(), descriptor); + } + + @Test + void testRange() throws Exception { + final TableSchema tableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder() + .setName("range_date") + .setType(TableFieldSchema.Type.RANGE) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.DATE) + .build()) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("range_datetime") + .setType(TableFieldSchema.Type.RANGE) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.DATETIME) + .build()) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("range_timestamp") + .setType(TableFieldSchema.Type.RANGE) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .build()) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("range_date_miXEd_caSE") + .setType(TableFieldSchema.Type.RANGE) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.DATE) + .build()) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("range_datetime_miXEd_caSE") + .setType(TableFieldSchema.Type.RANGE) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.DATETIME) + .build()) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("range_timestamp_miXEd_caSE") + .setType(TableFieldSchema.Type.RANGE) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .build()) + .build()) + .build(); + final Descriptor descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); + assertDescriptorsAreEqual(JsonTest.TestRange.getDescriptor(), descriptor); + } + + @Test + void testStructSimple() throws Exception { + final TableFieldSchema stringType = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_field_type") + .build(); + final TableFieldSchema tableFieldSchema = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRUCT) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_field_type") + .addFields(0, stringType) + .build(); + final TableSchema tableSchema = TableSchema.newBuilder().addFields(0, tableFieldSchema).build(); + final Descriptor descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); + assertDescriptorsAreEqual(SchemaTest.MessageType.getDescriptor(), descriptor); + } + + @Test + void testStructComplex() throws Exception { + final TableFieldSchema test_int = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INT64) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_int") + .build(); + final TableFieldSchema test_string = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_string") + .build(); + final TableFieldSchema test_bytes = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BYTES) + .setMode(TableFieldSchema.Mode.REQUIRED) + .setName("test_bytes") + .build(); + final TableFieldSchema test_bool = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BOOL) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bool") + .build(); + final TableFieldSchema test_double = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DOUBLE) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_double") + .build(); + final TableFieldSchema test_date = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DATE) + .setMode(TableFieldSchema.Mode.REQUIRED) + .setName("test_date") + .build(); + final TableFieldSchema test_datetime = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DATETIME) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_datetime") + .build(); + final TableFieldSchema test_datetime_str = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DATETIME) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_datetime_str") + .build(); + final TableFieldSchema ComplexLvl2 = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRUCT) + .setMode(TableFieldSchema.Mode.REQUIRED) + .addFields(0, test_int) + .setName("complex_lvl2") + .build(); + final TableFieldSchema ComplexLvl1 = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRUCT) + .setMode(TableFieldSchema.Mode.REQUIRED) + .addFields(0, test_int) + .addFields(1, ComplexLvl2) + .setName("complex_lvl1") + .build(); + final TableFieldSchema TEST_NUMERIC = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric") + .build(); + final TableFieldSchema TEST_GEO = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.GEOGRAPHY) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_geo") + .build(); + final TableFieldSchema TEST_TIMESTAMP = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_timestamp") + .build(); + final TableFieldSchema TEST_TIME = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIME) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_time") + .build(); + final TableFieldSchema TEST_TIME_STR = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIME) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_time_str") + .build(); + final TableFieldSchema TEST_NUMERIC_REPEATED = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_numeric_repeated") + .build(); + final TableFieldSchema TEST_NUMERIC_STR = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_str") + .build(); + final TableFieldSchema TEST_NUMERIC_SHORT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_short") + .build(); + final TableFieldSchema TEST_NUMERIC_INT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_int") + .build(); + final TableFieldSchema TEST_NUMERIC_LONG = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_long") + .build(); + final TableFieldSchema TEST_NUMERIC_FLOAT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_float") + .build(); + final TableFieldSchema TEST_NUMERIC_DOUBLE = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_double") + .build(); + final TableFieldSchema TEST_BIGNUMERIC = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bignumeric") + .build(); + final TableFieldSchema TEST_BIGNUMERIC_STR = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_bignumeric_str") + .build(); + final TableFieldSchema TEST_BIGNUMERIC_SHORT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bignumeric_short") + .build(); + final TableFieldSchema TEST_BIGNUMERIC_INT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bignumeric_int") + .build(); + final TableFieldSchema TEST_BIGNUMERIC_LONG = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bignumeric_long") + .build(); + final TableFieldSchema TEST_BIGNUMERIC_FLOAT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bignumeric_float") + .build(); + final TableFieldSchema TEST_BIGNUMERIC_DOUBLE = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bignumeric_double") + .build(); + final TableFieldSchema TEST_INTERVAL = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INTERVAL) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_interval") + .build(); + final TableFieldSchema TEST_JSON = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.JSON) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_json") + .build(); + final TableFieldSchema TEST_TIMESTAMP_HIGHER_PRECISION = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_timestamp_higher_precision") + .setTimestampPrecision(Int64Value.newBuilder().setValue(12).build()) + .build(); + final TableFieldSchema TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_timestamp_higher_precision_repeated") + .setTimestampPrecision(Int64Value.newBuilder().setValue(12).build()) + .build(); + final TableSchema tableSchema = + TableSchema.newBuilder() + .addFields(0, test_int) + .addFields(1, test_string) + .addFields(2, test_bytes) + .addFields(3, test_bool) + .addFields(4, test_double) + .addFields(5, test_date) + .addFields(6, test_datetime) + .addFields(7, test_datetime_str) + .addFields(8, ComplexLvl1) + .addFields(9, ComplexLvl2) + .addFields(10, TEST_NUMERIC) + .addFields(11, TEST_GEO) + .addFields(12, TEST_TIMESTAMP) + .addFields(13, TEST_TIME) + .addFields(14, TEST_TIME_STR) + .addFields(15, TEST_NUMERIC_REPEATED) + .addFields(16, TEST_NUMERIC_STR) + .addFields(17, TEST_NUMERIC_SHORT) + .addFields(18, TEST_NUMERIC_INT) + .addFields(19, TEST_NUMERIC_LONG) + .addFields(20, TEST_NUMERIC_FLOAT) + .addFields(21, TEST_NUMERIC_DOUBLE) + .addFields(22, TEST_BIGNUMERIC) + .addFields(23, TEST_BIGNUMERIC_STR) + .addFields(24, TEST_BIGNUMERIC_SHORT) + .addFields(25, TEST_BIGNUMERIC_INT) + .addFields(26, TEST_BIGNUMERIC_LONG) + .addFields(27, TEST_BIGNUMERIC_FLOAT) + .addFields(28, TEST_BIGNUMERIC_DOUBLE) + .addFields(29, TEST_INTERVAL) + .addFields(30, TEST_JSON) + .addFields(31, TEST_TIMESTAMP_HIGHER_PRECISION) + .addFields(32, TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .build(); + final Descriptor descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); + assertDescriptorsAreEqual(JsonTest.ComplexRoot.getDescriptor(), descriptor); + } + + @Test + void testCasingComplexStruct() throws Exception { + final TableFieldSchema required = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INT64) + .setMode(TableFieldSchema.Mode.REQUIRED) + .setName("tEsT_ReQuIrEd") + .build(); + final TableFieldSchema repeated = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INT64) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("tESt_repEATed") + .build(); + final TableFieldSchema optional = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INT64) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_opTIONal") + .build(); + final TableFieldSchema test_int = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INT64) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("TEST_INT") + .build(); + final TableFieldSchema test_string = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("TEST_STRING") + .build(); + final TableFieldSchema test_bytes = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BYTES) + .setMode(TableFieldSchema.Mode.REQUIRED) + .setName("TEST_BYTES") + .build(); + final TableFieldSchema test_bool = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BOOL) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("TEST_BOOL") + .build(); + final TableFieldSchema test_double = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DOUBLE) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("TEST_DOUBLE") + .build(); + final TableFieldSchema test_date = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DATE) + .setMode(TableFieldSchema.Mode.REQUIRED) + .setName("TEST_DATE") + .build(); + final TableFieldSchema option_test = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRUCT) + .setMode(TableFieldSchema.Mode.REQUIRED) + .addFields(0, required) + .addFields(1, repeated) + .addFields(2, optional) + .setName("option_test") + .build(); + final TableSchema tableSchema = + TableSchema.newBuilder() + .addFields(0, test_int) + .addFields(1, test_string) + .addFields(2, test_bytes) + .addFields(3, test_bool) + .addFields(4, test_double) + .addFields(5, test_date) + .addFields(6, option_test) + .build(); + final Descriptor descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); + assertDescriptorsAreEqual(JsonTest.CasingComplex.getDescriptor(), descriptor); + } + + @Test + void testOptions() throws Exception { + final TableFieldSchema required = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INT64) + .setMode(TableFieldSchema.Mode.REQUIRED) + .setName("test_required") + .build(); + final TableFieldSchema repeated = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INT64) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_repeated") + .build(); + final TableFieldSchema optional = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INT64) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_optional") + .build(); + final TableSchema tableSchema = + TableSchema.newBuilder() + .addFields(0, required) + .addFields(1, repeated) + .addFields(2, optional) + .build(); + final Descriptor descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); + assertDescriptorsAreEqual(JsonTest.OptionTest.getDescriptor(), descriptor); + } + + @Test + void testDescriptorReuseDuringCreation() throws Exception { + final TableFieldSchema test_int = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INT64) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_int") + .build(); + final TableFieldSchema reuse_lvl2 = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRUCT) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("reuse_lvl2") + .addFields(0, test_int) + .build(); + final TableFieldSchema reuse_lvl1 = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRUCT) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("reuse_lvl1") + .addFields(0, test_int) + .addFields(0, reuse_lvl2) + .build(); + final TableFieldSchema reuse_lvl1_1 = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRUCT) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("reuse_lvl1_1") + .addFields(0, test_int) + .addFields(0, reuse_lvl2) + .build(); + final TableFieldSchema reuse_lvl1_2 = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRUCT) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("reuse_lvl1_2") + .addFields(0, test_int) + .addFields(0, reuse_lvl2) + .build(); + final TableSchema tableSchema = + TableSchema.newBuilder() + .addFields(0, reuse_lvl1) + .addFields(1, reuse_lvl1_1) + .addFields(2, reuse_lvl1_2) + .build(); + final Descriptor descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); + HashMap descriptorToCount = new HashMap(); + mapDescriptorToCount(descriptor, descriptorToCount); + assertEquals(descriptorToCount.size(), 2); + assertTrue(descriptorToCount.containsKey("root__reuse_lvl1")); + assertEquals(descriptorToCount.get("root__reuse_lvl1").intValue(), 3); + assertTrue(descriptorToCount.containsKey("root__reuse_lvl1__reuse_lvl2")); + assertEquals(descriptorToCount.get("root__reuse_lvl1__reuse_lvl2").intValue(), 3); + assertDescriptorsAreEqual(JsonTest.ReuseRoot.getDescriptor(), descriptor); + } + + @Test + void testNestedFlexibleFieldName() throws Exception { + final TableFieldSchema stringField = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("str-列") + .build(); + final TableFieldSchema intField = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INT64) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("int-列") + .build(); + final TableFieldSchema nestedField = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRUCT) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("nested-列") + .addFields(0, intField) + .build(); + final TableSchema tableSchema = + TableSchema.newBuilder().addFields(0, stringField).addFields(1, nestedField).build(); + final Descriptor descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); + assertDescriptorsAreEqual(SchemaTest.TestNestedFlexibleFieldName.getDescriptor(), descriptor); + } + + @Test + void timestampField_defaultPrecision() throws Exception { + TableFieldSchema timestampField = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = + BQTableSchemaToProtoDescriptor.convertBQTableFieldToProtoField(timestampField, 0, null); + assertEquals( + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, fieldDescriptorProto.getType()); + } + + @Test + void timestampField_picosecondPrecision() throws Exception { + TableFieldSchema timestampField = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setTimestampPrecision(Int64Value.newBuilder().setValue(12).build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = + BQTableSchemaToProtoDescriptor.convertBQTableFieldToProtoField(timestampField, 0, null); + assertEquals( + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, fieldDescriptorProto.getType()); + } + + @Test + void timestampField_unexpectedPrecision() throws Exception { + TableFieldSchema timestampField = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setTimestampPrecision(Int64Value.newBuilder().setValue(13).build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = + BQTableSchemaToProtoDescriptor.convertBQTableFieldToProtoField(timestampField, 0, null); + assertEquals( + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, fieldDescriptorProto.getType()); + + TableFieldSchema timestampField1 = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setTimestampPrecision(Int64Value.newBuilder().setValue(7).build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto1 = + BQTableSchemaToProtoDescriptor.convertBQTableFieldToProtoField(timestampField1, 0, null); + assertEquals( + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, fieldDescriptorProto1.getType()); + + TableFieldSchema timestampField2 = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setTimestampPrecision(Int64Value.newBuilder().setValue(-1).build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto2 = + BQTableSchemaToProtoDescriptor.convertBQTableFieldToProtoField(timestampField2, 0, null); + assertEquals( + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, fieldDescriptorProto2.getType()); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java new file mode 100644 index 000000000000..8ffcc57baba2 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java @@ -0,0 +1,294 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StatusCode; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Timestamp; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class BaseBigQueryReadClientTest { + private static MockBigQueryRead mockBigQueryRead; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private BaseBigQueryReadClient client; + + @BeforeClass + public static void startStaticServer() { + mockBigQueryRead = new MockBigQueryRead(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockBigQueryRead)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + BaseBigQueryReadSettings settings = + BaseBigQueryReadSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = BaseBigQueryReadClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void createReadSessionTest() throws Exception { + ReadSession expectedResponse = + ReadSession.newBuilder() + .setName("name3373707") + .setExpireTime(Timestamp.newBuilder().build()) + .setDataFormat(DataFormat.forNumber(0)) + .setTable(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .setTableModifiers(ReadSession.TableModifiers.newBuilder().build()) + .setReadOptions(ReadSession.TableReadOptions.newBuilder().build()) + .addAllStreams(new ArrayList()) + .setEstimatedTotalBytesScanned(452788190) + .setEstimatedTotalPhysicalFileSize(938325754) + .setEstimatedRowCount(-1745583577) + .setTraceId("traceId-1067401920") + .build(); + mockBigQueryRead.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + + ReadSession actualResponse = client.createReadSession(parent, readSession, maxStreamCount); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryRead.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateReadSessionRequest actualRequest = ((CreateReadSessionRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(readSession, actualRequest.getReadSession()); + Assert.assertEquals(maxStreamCount, actualRequest.getMaxStreamCount()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createReadSessionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + client.createReadSession(parent, readSession, maxStreamCount); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createReadSessionTest2() throws Exception { + ReadSession expectedResponse = + ReadSession.newBuilder() + .setName("name3373707") + .setExpireTime(Timestamp.newBuilder().build()) + .setDataFormat(DataFormat.forNumber(0)) + .setTable(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .setTableModifiers(ReadSession.TableModifiers.newBuilder().build()) + .setReadOptions(ReadSession.TableReadOptions.newBuilder().build()) + .addAllStreams(new ArrayList()) + .setEstimatedTotalBytesScanned(452788190) + .setEstimatedTotalPhysicalFileSize(938325754) + .setEstimatedRowCount(-1745583577) + .setTraceId("traceId-1067401920") + .build(); + mockBigQueryRead.addResponse(expectedResponse); + + String parent = "parent-995424086"; + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + + ReadSession actualResponse = client.createReadSession(parent, readSession, maxStreamCount); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryRead.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateReadSessionRequest actualRequest = ((CreateReadSessionRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(readSession, actualRequest.getReadSession()); + Assert.assertEquals(maxStreamCount, actualRequest.getMaxStreamCount()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createReadSessionExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + + try { + String parent = "parent-995424086"; + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + client.createReadSession(parent, readSession, maxStreamCount); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void readRowsTest() throws Exception { + ReadRowsResponse expectedResponse = + ReadRowsResponse.newBuilder() + .setRowCount(1340416618) + .setStats(StreamStats.newBuilder().build()) + .setThrottleState(ThrottleState.newBuilder().build()) + .setUncompressedByteSize(-2094376525) + .build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = + ReadRowsRequest.newBuilder() + .setReadStream( + ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString()) + .setOffset(-1019779949) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void readRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + ReadRowsRequest request = + ReadRowsRequest.newBuilder() + .setReadStream( + ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString()) + .setOffset(-1019779949) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void splitReadStreamTest() throws Exception { + SplitReadStreamResponse expectedResponse = + SplitReadStreamResponse.newBuilder() + .setPrimaryStream(ReadStream.newBuilder().build()) + .setRemainderStream(ReadStream.newBuilder().build()) + .build(); + mockBigQueryRead.addResponse(expectedResponse); + + SplitReadStreamRequest request = + SplitReadStreamRequest.newBuilder() + .setName( + ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString()) + .setFraction(-1653751294) + .build(); + + SplitReadStreamResponse actualResponse = client.splitReadStream(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryRead.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + SplitReadStreamRequest actualRequest = ((SplitReadStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getName(), actualRequest.getName()); + Assert.assertEquals(request.getFraction(), actualRequest.getFraction(), 0.0001); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void splitReadStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + + try { + SplitReadStreamRequest request = + SplitReadStreamRequest.newBuilder() + .setName( + ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString()) + .setFraction(-1653751294) + .build(); + client.splitReadStream(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryReadClientTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryReadClientTest.java new file mode 100644 index 000000000000..cd53248b635a --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryReadClientTest.java @@ -0,0 +1,337 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.InternalException; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.ResourceExhaustedException; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StatusCode; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Duration; +import com.google.protobuf.Parser; +import com.google.rpc.RetryInfo; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class BigQueryReadClientTest { + private static MockBigQueryRead mockBigQueryRead; + private static MockServiceHelper serviceHelper; + private BigQueryReadClient client; + private LocalChannelProvider channelProvider; + private int retryCount; + private Code lastRetryStatusCode; + + @BeforeAll + static void startStaticServer() { + mockBigQueryRead = new MockBigQueryRead(); + serviceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockBigQueryRead)); + serviceHelper.start(); + } + + @AfterAll + static void stopServer() { + serviceHelper.stop(); + } + + @BeforeEach + void setUp() throws IOException { + serviceHelper.reset(); + channelProvider = serviceHelper.createChannelProvider(); + retryCount = 0; + lastRetryStatusCode = Code.OK; + BigQueryReadSettings settings = + BigQueryReadSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setReadRowsRetryAttemptListener( + new BigQueryReadSettings.RetryAttemptListener() { + @Override + public void onRetryAttempt(Status prevStatus, Metadata prevMetadata) { + synchronized (this) { + retryCount += 1; + lastRetryStatusCode = prevStatus.getCode(); + } + } + }) + .build(); + client = BigQueryReadClient.create(settings); + } + + @AfterEach + void tearDown() throws Exception { + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + + @Test + @SuppressWarnings("all") + void createReadSessionTest() { + String name = "name3373707"; + String table = "table110115790"; + ReadSession expectedResponse = ReadSession.newBuilder().setName(name).setTable(table).build(); + mockBigQueryRead.addResponse(expectedResponse); + + String parent = "parent-995424086"; + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + + ReadSession actualResponse = client.createReadSession(parent, readSession, maxStreamCount); + assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryRead.getRequests(); + assertEquals(1, actualRequests.size()); + CreateReadSessionRequest actualRequest = (CreateReadSessionRequest) actualRequests.get(0); + + assertEquals(parent, actualRequest.getParent()); + + assertEquals(readSession, actualRequest.getReadSession()); + + assertEquals(maxStreamCount, actualRequest.getMaxStreamCount()); + + assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + void createReadSessionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + + String parent = "parent-995424086"; + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + + assertThrows( + InvalidArgumentException.class, + () -> client.createReadSession(parent, readSession, maxStreamCount)); + } + + @Test + @SuppressWarnings("all") + void readRowsTest() throws Exception { + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + List actualResponses = responseObserver.future().get(); + assertEquals(1, actualResponses.size()); + assertEquals(expectedResponse, actualResponses.get(0)); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); + } + + @Test + @SuppressWarnings("all") + void readRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + ExecutionException e = + assertThrows(ExecutionException.class, () -> responseObserver.future().get()); + assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); + assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); + } + + @Test + @SuppressWarnings("all") + void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException { + ApiException exception = + new InternalException( + new StatusRuntimeException( + Status.INTERNAL.withDescription( + "Received unexpected EOS on DATA frame from server")), + GrpcStatusCode.of(Code.INTERNAL), + /* retryable= */ false); + mockBigQueryRead.addException(exception); + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + List actualResponses = responseObserver.future().get(); + assertEquals(1, actualResponses.size()); + + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.INTERNAL); + } + + @Test + @SuppressWarnings("all") + void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException { + ApiException exception = + new InternalException( + new StatusRuntimeException( + Status.INTERNAL.withDescription( + "HTTP/2 error code: INTERNAL_ERROR\nReceived Rst Stream")), + GrpcStatusCode.of(Code.INTERNAL), + /* retryable= */ false); + mockBigQueryRead.addException(exception); + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + List actualResponses = responseObserver.future().get(); + assertEquals(1, actualResponses.size()); + + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.INTERNAL); + } + + @Test + @SuppressWarnings("all") + void readRowsNoRetryForResourceExhaustedWithoutRetryInfo() + throws ExecutionException, InterruptedException { + ApiException exception = + new ResourceExhaustedException( + new StatusRuntimeException( + Status.RESOURCE_EXHAUSTED.withDescription("You are out of quota X")), + GrpcStatusCode.of(Code.RESOURCE_EXHAUSTED), + /* retryable= */ false); + mockBigQueryRead.addException(exception); + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + ExecutionException e = + assertThrows(ExecutionException.class, () -> responseObserver.future().get()); + assertTrue(e.getCause() instanceof ResourceExhaustedException); + ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause(); + assertEquals(StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode()); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); + } + + @Test + @SuppressWarnings("all") + void readRowsNoRetryForResourceExhaustedWithRetryInfo() + throws ExecutionException, InterruptedException { + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay(Duration.newBuilder().setSeconds(123).setNanos(456).build()) + .build(); + + Metadata metadata = new Metadata(); + metadata.put( + Metadata.Key.of( + "google.rpc.retryinfo-bin", + new Metadata.BinaryMarshaller() { + @Override + public byte[] toBytes(RetryInfo value) { + return value.toByteArray(); + } + + @Override + public RetryInfo parseBytes(byte[] serialized) { + try { + Parser parser = (RetryInfo.newBuilder().build()).getParserForType(); + return parser.parseFrom(serialized); + } catch (Exception e) { + return null; + } + } + }), + retryInfo); + + ApiException exception = + new ResourceExhaustedException( + new StatusRuntimeException( + Status.RESOURCE_EXHAUSTED.withDescription("Try again in a bit"), metadata), + GrpcStatusCode.of(Code.RESOURCE_EXHAUSTED), + /* retryable= */ false); + mockBigQueryRead.addException(exception); + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + List actualResponses = responseObserver.future().get(); + assertEquals(1, actualResponses.size()); + + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.RESOURCE_EXHAUSTED); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQuerySchemaUtilTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQuerySchemaUtilTest.java new file mode 100644 index 000000000000..64c3c1962336 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQuerySchemaUtilTest.java @@ -0,0 +1,67 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.cloud.bigquery.storage.test.SchemaTest.SupportedTypes; +import com.google.cloud.bigquery.storage.test.SchemaTest.TestNestedFlexibleFieldName; +import com.google.protobuf.Descriptors.Descriptor; +import java.util.Arrays; +import java.util.List; +import org.junit.jupiter.api.Test; + +class BigQuerySchemaUtilTest { + + @Test + void testIsProtoCompatible() { + List protoCompatibleNames = Arrays.asList("col_1", "name", "_0_"); + List protoIncompatibleNames = Arrays.asList("0_col", "()", "列", "a-1"); + protoCompatibleNames.stream() + .forEach( + name -> { + assertTrue(BigQuerySchemaUtil.isProtoCompatible(name)); + }); + protoIncompatibleNames.stream() + .forEach( + name -> { + assertFalse(BigQuerySchemaUtil.isProtoCompatible(name)); + }); + } + + @Test + void testGeneratePlaceholderFieldName() { + assertEquals("col_c3RyLeWIlw", BigQuerySchemaUtil.generatePlaceholderFieldName("str-列")); + // Base64 url encodes "~/~/" to "fi9-Lw", we replaced - with _ to be proto compatible. + assertEquals("col_fi9_Lw", BigQuerySchemaUtil.generatePlaceholderFieldName("~/~/")); + } + + @Test + void testGetFieldName() { + // Test get name from annotations. + Descriptor flexibleDescriptor = TestNestedFlexibleFieldName.getDescriptor(); + assertEquals("str-列", BigQuerySchemaUtil.getFieldName(flexibleDescriptor.getFields().get(0))); + assertEquals( + "nested-列", BigQuerySchemaUtil.getFieldName(flexibleDescriptor.getFields().get(1))); + + // Test get name without annotations. + Descriptor descriptor = SupportedTypes.getDescriptor(); + assertEquals("int32_value", BigQuerySchemaUtil.getFieldName(descriptor.getFields().get(0))); + assertEquals("int64_value", BigQuerySchemaUtil.getFieldName(descriptor.getFields().get(1))); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClientTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClientTest.java new file mode 100644 index 000000000000..005b8b0ab0dd --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClientTest.java @@ -0,0 +1,553 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Int64Value; +import com.google.protobuf.Timestamp; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class BigQueryWriteClientTest { + private static MockBigQueryWrite mockBigQueryWrite; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private BigQueryWriteClient client; + + @BeforeClass + public static void startStaticServer() { + mockBigQueryWrite = new MockBigQueryWrite(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockBigQueryWrite)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + BigQueryWriteSettings settings = + BigQueryWriteSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = BigQueryWriteClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void createWriteStreamTest() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .setLocation("location1901043637") + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); + WriteStream writeStream = WriteStream.newBuilder().build(); + + WriteStream actualResponse = client.createWriteStream(parent, writeStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateWriteStreamRequest actualRequest = ((CreateWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(writeStream, actualRequest.getWriteStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createWriteStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); + WriteStream writeStream = WriteStream.newBuilder().build(); + client.createWriteStream(parent, writeStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createWriteStreamTest2() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .setLocation("location1901043637") + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String parent = "parent-995424086"; + WriteStream writeStream = WriteStream.newBuilder().build(); + + WriteStream actualResponse = client.createWriteStream(parent, writeStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateWriteStreamRequest actualRequest = ((CreateWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(writeStream, actualRequest.getWriteStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createWriteStreamExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String parent = "parent-995424086"; + WriteStream writeStream = WriteStream.newBuilder().build(); + client.createWriteStream(parent, writeStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void appendRowsTest() throws Exception { + AppendRowsResponse expectedResponse = + AppendRowsResponse.newBuilder() + .setUpdatedSchema(TableSchema.newBuilder().build()) + .addAllRowErrors(new ArrayList()) + .setWriteStream("writeStream1412231231") + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + AppendRowsRequest request = + AppendRowsRequest.newBuilder() + .setWriteStream( + WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setOffset(Int64Value.newBuilder().build()) + .setTraceId("traceId-1067401920") + .putAllMissingValueInterpretations( + new HashMap()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.appendRowsCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + requestObserver.onCompleted(); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void appendRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + AppendRowsRequest request = + AppendRowsRequest.newBuilder() + .setWriteStream( + WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setOffset(Int64Value.newBuilder().build()) + .setTraceId("traceId-1067401920") + .putAllMissingValueInterpretations( + new HashMap()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.appendRowsCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void getWriteStreamTest() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .setLocation("location1901043637") + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + + WriteStream actualResponse = client.getWriteStream(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetWriteStreamRequest actualRequest = ((GetWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getWriteStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + client.getWriteStream(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getWriteStreamTest2() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .setLocation("location1901043637") + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String name = "name3373707"; + + WriteStream actualResponse = client.getWriteStream(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetWriteStreamRequest actualRequest = ((GetWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getWriteStreamExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String name = "name3373707"; + client.getWriteStream(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void finalizeWriteStreamTest() throws Exception { + FinalizeWriteStreamResponse expectedResponse = + FinalizeWriteStreamResponse.newBuilder().setRowCount(1340416618).build(); + mockBigQueryWrite.addResponse(expectedResponse); + + WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + + FinalizeWriteStreamResponse actualResponse = client.finalizeWriteStream(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + FinalizeWriteStreamRequest actualRequest = ((FinalizeWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void finalizeWriteStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + client.finalizeWriteStream(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void finalizeWriteStreamTest2() throws Exception { + FinalizeWriteStreamResponse expectedResponse = + FinalizeWriteStreamResponse.newBuilder().setRowCount(1340416618).build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String name = "name3373707"; + + FinalizeWriteStreamResponse actualResponse = client.finalizeWriteStream(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + FinalizeWriteStreamRequest actualRequest = ((FinalizeWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void finalizeWriteStreamExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String name = "name3373707"; + client.finalizeWriteStream(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchCommitWriteStreamsTest() throws Exception { + BatchCommitWriteStreamsResponse expectedResponse = + BatchCommitWriteStreamsResponse.newBuilder() + .setCommitTime(Timestamp.newBuilder().build()) + .addAllStreamErrors(new ArrayList()) + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); + + BatchCommitWriteStreamsResponse actualResponse = client.batchCommitWriteStreams(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchCommitWriteStreamsRequest actualRequest = + ((BatchCommitWriteStreamsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchCommitWriteStreamsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); + client.batchCommitWriteStreams(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchCommitWriteStreamsTest2() throws Exception { + BatchCommitWriteStreamsResponse expectedResponse = + BatchCommitWriteStreamsResponse.newBuilder() + .setCommitTime(Timestamp.newBuilder().build()) + .addAllStreamErrors(new ArrayList()) + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + BatchCommitWriteStreamsResponse actualResponse = client.batchCommitWriteStreams(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchCommitWriteStreamsRequest actualRequest = + ((BatchCommitWriteStreamsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchCommitWriteStreamsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String parent = "parent-995424086"; + client.batchCommitWriteStreams(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void flushRowsTest() throws Exception { + FlushRowsResponse expectedResponse = + FlushRowsResponse.newBuilder().setOffset(-1019779949).build(); + mockBigQueryWrite.addResponse(expectedResponse); + + WriteStreamName writeStream = + WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + + FlushRowsResponse actualResponse = client.flushRows(writeStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + FlushRowsRequest actualRequest = ((FlushRowsRequest) actualRequests.get(0)); + + Assert.assertEquals(writeStream.toString(), actualRequest.getWriteStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void flushRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + WriteStreamName writeStream = + WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + client.flushRows(writeStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void flushRowsTest2() throws Exception { + FlushRowsResponse expectedResponse = + FlushRowsResponse.newBuilder().setOffset(-1019779949).build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String writeStream = "writeStream1412231231"; + + FlushRowsResponse actualResponse = client.flushRows(writeStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + FlushRowsRequest actualRequest = ((FlushRowsRequest) actualRequests.get(0)); + + Assert.assertEquals(writeStream, actualRequest.getWriteStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void flushRowsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String writeStream = "writeStream1412231231"; + client.flushRows(writeStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPoolTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPoolTest.java new file mode 100644 index 000000000000..90b576d7c3da --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPoolTest.java @@ -0,0 +1,572 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.batching.FlowController; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.storage.test.Test.FooType; +import com.google.cloud.bigquery.storage.v1.ConnectionWorkerPool.Settings; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.protobuf.DescriptorProtos; +import com.google.protobuf.Int64Value; +import io.grpc.Status; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class ConnectionWorkerPoolTest { + + private FakeBigQueryWrite testBigQueryWrite; + private FakeScheduledExecutorService fakeExecutor; + private static MockServiceHelper serviceHelper; + private BigQueryWriteSettings clientSettings; + + private static final String TEST_TRACE_ID = "DATAFLOW:job_id"; + private static final String TEST_STREAM_1 = "projects/p1/datasets/d1/tables/t1/streams/_default"; + private static final String TEST_STREAM_2 = "projects/p1/datasets/d1/tables/t2/streams/_default"; + private static final int MAX_RETRY_NUM_ATTEMPTS = 3; + private static final long INITIAL_RETRY_MILLIS = 500; + private static final double RETRY_MULTIPLIER = 1.3; + private static final int MAX_RETRY_DELAY_MINUTES = 5; + private static final RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(INITIAL_RETRY_MILLIS)) + .setRetryDelayMultiplier(RETRY_MULTIPLIER) + .setMaxAttempts(MAX_RETRY_NUM_ATTEMPTS) + .setMaxRetryDelay(org.threeten.bp.Duration.ofMinutes(MAX_RETRY_DELAY_MINUTES)) + .build(); + + @BeforeEach + void setUp() throws Exception { + testBigQueryWrite = new FakeBigQueryWrite(); + serviceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(testBigQueryWrite)); + serviceHelper.start(); + fakeExecutor = new FakeScheduledExecutorService(); + testBigQueryWrite.setExecutor(fakeExecutor); + clientSettings = + BigQueryWriteSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setTransportChannelProvider(serviceHelper.createChannelProvider()) + .build(); + ConnectionWorker.Load.setOverwhelmedCountsThreshold(0.5); + ConnectionWorker.Load.setOverwhelmedBytesThreshold(0.6); + } + + @Test + void testSingleTableConnection_noOverwhelmedConnection() throws Exception { + // Set the max requests count to a large value so we will not scaling up. + testSendRequestsToMultiTable( + /* requestToSend= */ 100, + /* maxRequests= */ 100000, + /* maxConnections= */ 8, + /* expectedConnectionCount= */ 1, + /* tableCount= */ 1); + } + + @Test + void testSingleTableConnections_overwhelmed() throws Exception { + // A connection will be considered overwhelmed when the requests count reach 5 (max 10). + testSendRequestsToMultiTable( + /* requestToSend= */ 100, + /* maxRequests= */ 10, + /* maxConnections= */ 8, + /* expectedConnectionCount= */ 8, + /* tableCount= */ 1); + } + + @Test + void testMultiTableConnection_noOverwhelmedConnection() throws Exception { + // Set the max requests count to a large value so we will not scaling up. + // All tables will share the two connections (2 becasue we set the min connections to be 2). + testSendRequestsToMultiTable( + /* requestToSend= */ 100, + /* maxRequests= */ 100000, + /* maxConnections= */ 8, + /* expectedConnectionCount= */ 2, + /* tableCount= */ 4); + } + + @Test + void testMultiTableConnections_overwhelmed_reachingMaximum() throws Exception { + // A connection will be considered overwhelmed when the requests count reach 5 (max 10). + testSendRequestsToMultiTable( + /* requestToSend= */ 100, + /* maxRequests= */ 10, + /* maxConnections= */ 8, + /* expectedConnectionCount= */ 8, + /* tableCount= */ 4); + } + + @Test + void testMultiTableConnections_overwhelmed_overTotalLimit() throws Exception { + // A connection will be considered overwhelmed when the requests count reach 5 (max 10). + testSendRequestsToMultiTable( + /* requestToSend= */ 200, + /* maxRequests= */ 10, + /* maxConnections= */ 8, + /* expectedConnectionCount= */ 8, + /* tableCount= */ 10); + } + + @Test + void testMultiTableConnections_overwhelmed_notReachingMaximum() throws Exception { + // A connection will be considered overwhelmed when the requests count reach 5 (max 10). + testSendRequestsToMultiTable( + /* requestToSend= */ 20, + /* maxRequests= */ 10, + /* maxConnections= */ 8, + /* expectedConnectionCount= */ 4, + /* tableCount= */ 4); + } + + private void testSendRequestsToMultiTable( + int requestToSend, + int maxRequests, + int maxConnections, + int expectedConnectionCount, + int tableCount) + throws IOException, ExecutionException, InterruptedException { + ConnectionWorkerPool.setOptions( + Settings.builder() + .setMinConnectionsPerRegion(2) + .setMaxConnectionsPerRegion(maxConnections) + .build()); + ConnectionWorkerPool connectionWorkerPool = + createConnectionWorkerPool( + maxRequests, /* maxBytes= */ 100000, java.time.Duration.ofSeconds(5)); + + // Sets the sleep time to simulate requests stuck in connection. + testBigQueryWrite.setResponseSleep(Duration.ofMillis(50L)); + + // Try append 100 requests. + long appendCount = requestToSend; + for (long i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + List> futures = new ArrayList<>(); + + // Create one stream writer per table. + List streamWriterList = new ArrayList<>(); + for (int i = 0; i < tableCount; i++) { + streamWriterList.add( + getTestStreamWriter( + String.format("projects/p1/datasets/d1/tables/t%s/streams/_default", i))); + } + + for (long i = 0; i < appendCount; i++) { + // Round robinly insert requests to different tables. + futures.add( + sendFooStringTestMessage( + streamWriterList.get((int) (i % streamWriterList.size())), + connectionWorkerPool, + new String[] {String.valueOf(i)}, + i)); + } + + for (int i = 0; i < appendCount; i++) { + AppendRowsResponse response = futures.get(i).get(); + assertThat(response.getAppendResult().getOffset().getValue()).isEqualTo(i); + } + // At the end we should scale up to 8 connections. + assertThat(connectionWorkerPool.getCreateConnectionCount()).isEqualTo(expectedConnectionCount); + + assertThat(testBigQueryWrite.getAppendRequests().size()).isEqualTo(appendCount); + // The request order server received is no longer guaranteed, + HashSet offsets = new HashSet<>(); + for (int i = 0; i < appendCount; i++) { + AppendRowsRequest serverRequest = testBigQueryWrite.getAppendRequests().get(i); + assertThat(serverRequest.getProtoRows().getRows().getSerializedRowsCount()).isGreaterThan(0); + offsets.add(serverRequest.getOffset().getValue()); + } + assertThat(offsets.size()).isEqualTo(appendCount); + } + + @Test + void testMultiStreamClosed_multiplexingEnabled() throws Exception { + ConnectionWorkerPool.setOptions( + Settings.builder().setMaxConnectionsPerRegion(10).setMinConnectionsPerRegion(5).build()); + ConnectionWorkerPool connectionWorkerPool = + createConnectionWorkerPool( + /* maxRequests= */ 3, /* maxBytes= */ 1000, java.time.Duration.ofSeconds(5)); + + // Sets the sleep time to simulate requests stuck in connection. + testBigQueryWrite.setResponseSleep(Duration.ofMillis(50L)); + StreamWriter writeStream1 = getTestStreamWriter(TEST_STREAM_1); + StreamWriter writeStream2 = getTestStreamWriter(TEST_STREAM_2); + + // Try append 20 requests, at the end we should have 2 requests per connection. + long appendCount = 20; + for (long i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + List> futures = new ArrayList<>(); + + // We will start inserting to two tables interferely. + // The final status of each connection queue will be + // (s1 is the request coming from writeStream 1, etc): + // c1: [s1, s1], c2: [s2, s2], c3: [s1, s1], c4: [s2, s2] + // c5 - c10: [s1, s2] + for (int i = 0; i < appendCount; i++) { + StreamWriter writeStream = i % 2 == 0 ? writeStream1 : writeStream2; + futures.add( + sendFooStringTestMessage( + writeStream, connectionWorkerPool, new String[] {String.valueOf(i)}, i)); + } + + for (ApiFuture future : futures) { + future.get(); + } + // At the end we should scale up to 10 connections. + assertThat(connectionWorkerPool.getCreateConnectionCount()).isEqualTo(10); + assertThat(connectionWorkerPool.getTotalConnectionCount()).isEqualTo(10); + + // Start testing calling close on each stream. + // When we close the first stream, only the connection that only serve stream 1 will be closed. + // for which c1 and c3 are closed. + connectionWorkerPool.close(writeStream1); + assertThat(connectionWorkerPool.getTotalConnectionCount()).isEqualTo(8); + + // The next time we call close, every connection will be closed. + connectionWorkerPool.close(writeStream2); + assertThat(connectionWorkerPool.getTotalConnectionCount()).isEqualTo(0); + } + + @Test + void testMultiStreamAppend_appendWhileClosing() throws Exception { + ConnectionWorkerPool.setOptions( + Settings.builder().setMaxConnectionsPerRegion(10).setMinConnectionsPerRegion(5).build()); + ConnectionWorkerPool connectionWorkerPool = + createConnectionWorkerPool( + /* maxRequests= */ 3, /* maxBytes= */ 100000, java.time.Duration.ofSeconds(5)); + + // Sets the sleep time to simulate requests stuck in connection. + testBigQueryWrite.setResponseSleep(Duration.ofMillis(50L)); + StreamWriter writeStream1 = getTestStreamWriter(TEST_STREAM_1); + StreamWriter writeStream2 = getTestStreamWriter(TEST_STREAM_2); + + // Try append 10 requests, at the end we should have 2 requests per connection, and 5 + // connections created. + long appendCount = 10; + for (long i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + List> futures = new ArrayList<>(); + + // 1. We will start inserting to two tables interferely. + // The final status of each connection queue will be + // (s1 is the request coming from writeStream 1, etc): + // c1: [s1, s1], c2: [s2, s2], c3: [s1, s1], c4: [s2, s2], c5: [s1, s2] + for (int i = 0; i < appendCount; i++) { + StreamWriter writeStream = i % 2 == 0 ? writeStream1 : writeStream2; + futures.add( + sendFooStringTestMessage( + writeStream, connectionWorkerPool, new String[] {String.valueOf(i)}, i)); + } + assertThat(connectionWorkerPool.getCreateConnectionCount()).isEqualTo(5); + assertThat(connectionWorkerPool.getTotalConnectionCount()).isEqualTo(5); + + // 2. Close one of the stream, after this close, since we will wait for the waiting queue to be + // drained in c1 and c3, at the same time the other queue should also be drained. + connectionWorkerPool.close(writeStream1); + assertThat(connectionWorkerPool.getTotalConnectionCount()).isEqualTo(3); + // Sleep 1 second to make sure every message is drained. + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + + // 3. Insert another batch of messages, since every connection has no in flight messages + // we should be able to reuse the previous 5 connections. + for (int i = 0; i < appendCount; i++) { + StreamWriter writeStream = i % 2 == 0 ? writeStream1 : writeStream2; + futures.add( + sendFooStringTestMessage( + writeStream, connectionWorkerPool, new String[] {String.valueOf(i)}, i)); + } + assertThat(connectionWorkerPool.getTotalConnectionCount()).isEqualTo(5); + for (ApiFuture future : futures) { + future.get(); + } + + // 4. Close write stream 1. Two connections associated with it will be closed. + connectionWorkerPool.close(writeStream1); + assertThat(connectionWorkerPool.getTotalConnectionCount()).isEqualTo(3); + + // 5. Close write stream 2, all should be closed. + connectionWorkerPool.close(writeStream2); + assertThat(connectionWorkerPool.getTotalConnectionCount()).isEqualTo(0); + } + + @Test + void testCloseWhileAppending_noDeadlockHappen() throws Exception { + ConnectionWorkerPool.setOptions( + Settings.builder().setMaxConnectionsPerRegion(10).setMinConnectionsPerRegion(5).build()); + ConnectionWorkerPool connectionWorkerPool = + createConnectionWorkerPool( + /* maxRequests= */ 1500, /* maxBytes= */ 100000, java.time.Duration.ofSeconds(5)); + + // Sets the sleep time to simulate requests stuck in connection. + testBigQueryWrite.setResponseSleep(Duration.ofMillis(20L)); + StreamWriter writeStream1 = getTestStreamWriter(TEST_STREAM_1); + + ListeningExecutorService threadPool = + MoreExecutors.listeningDecorator( + Executors.newCachedThreadPool( + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("AsyncStreamReadThread") + .build())); + + long appendCount = 10; + for (long i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + List> futures = new ArrayList<>(); + + for (int i = 0; i < 500; i++) { + futures.add( + threadPool.submit( + () -> { + sendFooStringTestMessage( + writeStream1, connectionWorkerPool, new String[] {String.valueOf(0)}, 0); + })); + } + connectionWorkerPool.close(writeStream1); + for (int i = 0; i < 500; i++) { + futures.get(i).get(); + } + } + + private class DummySupplierWillFailNTimesThenSucceed + implements Supplier { + private int failCount; + private final Status.Code errorCode; + private final String errorMessage; + private final int successOffset; + + DummySupplierWillFailNTimesThenSucceed( + int failCount, Status.Code errorCode, String errorMessage, int successOffset) { + this.failCount = failCount; + this.errorCode = errorCode; + this.errorMessage = errorMessage; + this.successOffset = successOffset; + } + + @Override + public FakeBigQueryWriteImpl.Response get() { + if (failCount <= 0) { + return new FakeBigQueryWriteImpl.Response(createAppendResponse(successOffset)); + } + failCount--; + return new FakeBigQueryWriteImpl.Response( + createAppendResponseWithError(errorCode, errorMessage)); + } + } + + @Test + void testAppendWithRetry() throws Exception { + ConnectionWorkerPool connectionWorkerPool = + createConnectionWorkerPool( + /* maxRequests= */ 1500, /* maxBytes= */ 100000, java.time.Duration.ofSeconds(5)); + + StreamWriter writeStream1 = getTestStreamWriter(TEST_STREAM_1); + + // Simulate the maximum allowable failures, followed by success. + testBigQueryWrite.addResponse( + new DummySupplierWillFailNTimesThenSucceed( + MAX_RETRY_NUM_ATTEMPTS, Status.RESOURCE_EXHAUSTED.getCode(), "test quota error A", 0)); + testBigQueryWrite.addResponse( + new DummySupplierWillFailNTimesThenSucceed( + MAX_RETRY_NUM_ATTEMPTS - 1, + Status.RESOURCE_EXHAUSTED.getCode(), + "test quota error B", + 1)); + testBigQueryWrite.addResponse(createAppendResponse(2)); + + List> futures = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + futures.add( + sendFooStringTestMessage( + writeStream1, connectionWorkerPool, new String[] {String.valueOf(i)}, i)); + } + for (int i = 0; i < 3; i++) { + futures.get(i).get(); + } + connectionWorkerPool.close(writeStream1); + } + + @Test + void testToTableName() { + assertThat(ConnectionWorkerPool.toTableName("projects/p/datasets/d/tables/t/streams/s")) + .isEqualTo("projects/p/datasets/d/tables/t"); + + IllegalArgumentException ex = + assertThrows( + IllegalArgumentException.class, () -> ConnectionWorkerPool.toTableName("projects/p/")); + } + + @Test + void testCloseExternalClient() throws IOException, InterruptedException, ExecutionException { + StreamWriter.clearConnectionPool(); + // Try append 100 requests. + long appendCount = 100L; + // testBigQueryWrite is used to + for (long i = 0; i < appendCount * 2; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + testBigQueryWrite.addResponse(WriteStream.newBuilder().setLocation("us").build()); + List> futures = new ArrayList<>(); + BigQueryWriteClient externalClient = + BigQueryWriteClient.create( + BigQueryWriteSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setTransportChannelProvider(serviceHelper.createChannelProvider()) + .build()); + // Create some stream writers. + List streamWriterList = new ArrayList<>(); + for (int i = 0; i < 4; i++) { + streamWriterList.add( + StreamWriter.newBuilder( + String.format("projects/p1/datasets/d1/tables/t%s/streams/_default", i), + externalClient) + .setEnableConnectionPool(true) + .setWriterSchema(createProtoSchema()) + .setTraceId(TEST_TRACE_ID) + .setLocation("us") + .setRetrySettings(retrySettings) + .build()); + } + + for (long i = 0; i < appendCount; i++) { + StreamWriter sw = streamWriterList.get((int) (i % streamWriterList.size())); + // Round robinly insert requests to different tables. + futures.add(sw.append(createProtoRows(new String[] {String.valueOf(i)}), i)); + } + externalClient.close(); + externalClient.awaitTermination(1, TimeUnit.MINUTES); + // Send more requests, the connections should still work. + for (long i = appendCount; i < appendCount * 2; i++) { + StreamWriter sw = streamWriterList.get((int) (i % streamWriterList.size())); + futures.add(sw.append(createProtoRows(new String[] {String.valueOf(i)}), i)); + } + for (int i = 0; i < appendCount * 2; i++) { + AppendRowsResponse response = futures.get(i).get(); + assertThat(response.getAppendResult().getOffset().getValue()).isEqualTo(i); + } + assertThat(testBigQueryWrite.getAppendRequests().size()).isEqualTo(appendCount * 2); + for (int i = 0; i < streamWriterList.size(); i++) { + streamWriterList.get(i).close(); + } + StreamWriter.clearConnectionPool(); + } + + private AppendRowsResponse createAppendResponse(long offset) { + return AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(offset)).build()) + .build(); + } + + private AppendRowsResponse createAppendResponseWithError(Status.Code code, String message) { + return AppendRowsResponse.newBuilder() + .setError(com.google.rpc.Status.newBuilder().setCode(code.value()).setMessage(message)) + .build(); + } + + private StreamWriter getTestStreamWriter(String streamName) throws IOException { + return StreamWriter.newBuilder(streamName) + .setWriterSchema(createProtoSchema()) + .setTraceId(TEST_TRACE_ID) + .setLocation("us") + .setCredentialsProvider(NoCredentialsProvider.create()) + .setChannelProvider(serviceHelper.createChannelProvider()) + .build(); + } + + private ProtoSchema createProtoSchema() { + return ProtoSchema.newBuilder() + .setProtoDescriptor( + DescriptorProtos.DescriptorProto.newBuilder() + .setName("Message") + .addField( + DescriptorProtos.FieldDescriptorProto.newBuilder() + .setName("foo") + .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING) + .setNumber(1) + .build()) + .build()) + .build(); + } + + private ApiFuture sendFooStringTestMessage( + StreamWriter writeStream, + ConnectionWorkerPool connectionWorkerPool, + String[] messages, + long offset) { + return connectionWorkerPool.append( + writeStream, + AppendFormats.AppendRowsData.of(createProtoRows(messages)), + offset, + /* requestUniqueId= */ "request_" + offset); + } + + private ProtoRows createProtoRows(String[] messages) { + ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); + for (String message : messages) { + FooType foo = FooType.newBuilder().setFoo(message).build(); + rowsBuilder.addSerializedRows(foo.toByteString()); + } + return rowsBuilder.build(); + } + + ConnectionWorkerPool createConnectionWorkerPool( + long maxRequests, long maxBytes, java.time.Duration maxRetryDuration) { + ConnectionWorkerPool.enableTestingLogic(); + return new ConnectionWorkerPool( + maxRequests, + maxBytes, + maxRetryDuration, + FlowController.LimitExceededBehavior.Block, + null, + clientSettings, + retrySettings, + /* enableRequestProfiler= */ false, + /*enableOpenTelemetry*/ false); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerTest.java new file mode 100644 index 000000000000..59feb63117b9 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerTest.java @@ -0,0 +1,1158 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static com.google.common.truth.Truth.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.batching.FlowController; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.storage.test.Test.ComplicateType; +import com.google.cloud.bigquery.storage.test.Test.FooType; +import com.google.cloud.bigquery.storage.test.Test.InnerType; +import com.google.cloud.bigquery.storage.v1.ConnectionWorker.Load; +import com.google.protobuf.ByteString; +import com.google.protobuf.DescriptorProtos; +import com.google.protobuf.Int64Value; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.opentelemetry.api.common.Attributes; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.channels.Channels; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.VarCharVector; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.VectorUnloader; +import org.apache.arrow.vector.ipc.ReadChannel; +import org.apache.arrow.vector.ipc.WriteChannel; +import org.apache.arrow.vector.ipc.message.ArrowRecordBatch; +import org.apache.arrow.vector.ipc.message.MessageSerializer; +import org.apache.arrow.vector.types.pojo.ArrowType; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.types.pojo.FieldType; +import org.apache.arrow.vector.types.pojo.Schema; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class ConnectionWorkerTest { + private static final Logger log = Logger.getLogger(StreamWriter.class.getName()); + private static final String TEST_STREAM_1 = "projects/p1/datasets/d1/tables/t1/streams/s1"; + private static final String TEST_STREAM_2 = "projects/p2/datasets/d2/tables/t2/streams/s2"; + private static final String TEST_TRACE_ID = "DATAFLOW:job_id"; + private static final RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(java.time.Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(3) + .setMaxRetryDelayDuration(java.time.Duration.ofMinutes(5)) + .build(); + + private FakeBigQueryWrite testBigQueryWrite; + private FakeScheduledExecutorService fakeExecutor; + private static MockServiceHelper serviceHelper; + private BigQueryWriteClient client; + + @BeforeEach + void setUp() throws Exception { + testBigQueryWrite = new FakeBigQueryWrite(); + ConnectionWorker.setMaxInflightQueueWaitTime(300000); + ConnectionWorker.setMaxInflightRequestWaitTime(Duration.ofMinutes(10)); + serviceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(testBigQueryWrite)); + serviceHelper.start(); + fakeExecutor = new FakeScheduledExecutorService(); + testBigQueryWrite.setExecutor(fakeExecutor); + client = + BigQueryWriteClient.create( + BigQueryWriteSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setTransportChannelProvider(serviceHelper.createChannelProvider()) + .build()); + } + + @AfterEach + void cleanUp() throws InterruptedException { + serviceHelper.stop(); + + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + + @Test + void testMultiplexedAppendSuccess_NonNullTraceId() throws Exception { + testMultiplexedIngestion( + /* sw1TraceId= */ "header_1:trailer_1", + /* sw2TraceId= */ "header_2:trailer_2", + /* expectedSW1TraceId= */ "java-streamwriter header_1:trailer_1", + /* expectedSW2TraceId= */ "java-streamwriter header_2:trailer_2"); + } + + @Test + void testMultiplexedAppendSuccess_EmptyTraceId() throws Exception { + testMultiplexedIngestion( + /* sw1TraceId= */ "header_1:trailer_1", + /* sw2TraceId= */ "", + /* expectedSW1TraceId= */ "java-streamwriter header_1:trailer_1", + /* expectedSW2TraceId= */ "java-streamwriter"); + } + + private void testMultiplexedIngestion( + String sw1TraceId, String sw2TraceId, String expectedSW1TraceId, String expectedSW2TraceId) + throws Exception { + try (ConnectionWorker connectionWorker = createMultiplexedConnectionWorker()) { + long appendCount = 20; + for (long i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + List> futures = new ArrayList<>(); + // SW1 has not trace id, SW2 does + StreamWriter.Builder sw1Builder = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema("foo")) + .setLocation("us"); + StreamWriter.Builder sw2Builder = + StreamWriter.newBuilder(TEST_STREAM_2, client) + .setWriterSchema(createProtoSchema("complicate")) + .setLocation("us"); + if (!sw1TraceId.isEmpty()) { + sw1Builder.setTraceId(sw1TraceId); + } + if (!sw2TraceId.isEmpty()) { + sw2Builder.setTraceId(sw2TraceId); + } + StreamWriter sw1 = sw1Builder.build(); + StreamWriter sw2 = sw2Builder.build(); + + // We do a pattern of: + // send to stream1, string1 + // send to stream1, string2 + // send to stream2, string3 + // send to stream2, string4 + // send to stream1, string5 + // ... + for (long i = 0; i < appendCount; i++) { + switch ((int) i % 4) { + case 0: + case 1: + ProtoRows rows = createFooProtoRows(new String[] {String.valueOf(i)}); + futures.add( + sendTestMessage( + connectionWorker, + sw1, + createFooProtoRows(new String[] {String.valueOf(i)}), + i)); + break; + case 2: + case 3: + futures.add( + sendTestMessage( + connectionWorker, + sw2, + createComplicateTypeProtoRows(new String[] {String.valueOf(i)}), + i)); + break; + default: // fall out + break; + } + } + // In the real world the response won't contain offset for default stream, but we use offset + // here just to test response. + for (int i = 0; i < appendCount; i++) { + Int64Value offset = futures.get(i).get().getAppendResult().getOffset(); + assertThat(offset).isEqualTo(Int64Value.of(i)); + } + assertThat(testBigQueryWrite.getAppendRequests().size()).isEqualTo(appendCount); + for (int i = 0; i < appendCount; i++) { + AppendRowsRequest serverRequest = testBigQueryWrite.getAppendRequests().get(i); + assertThat(serverRequest.getProtoRows().getRows().getSerializedRowsCount()) + .isGreaterThan(0); + assertThat(serverRequest.getOffset().getValue()).isEqualTo(i); + + // We will get the request as the pattern of: + // (writer_stream: t1, schema: t1) + // (writer_stream: t1, schema: _) + // (writer_stream: t2, schema: t2) -> multiplexing entered. + // (writer_stream: t2, schema: _) + // (writer_stream: t1, schema: t1) + // (writer_stream: t1, schema: _) + switch (i % 4) { + case 0: + assertThat(serverRequest.getWriteStream()).isEqualTo(TEST_STREAM_1); + assertThat( + serverRequest.getProtoRows().getWriterSchema().getProtoDescriptor().getName()) + .isEqualTo("foo"); + assertThat(serverRequest.getTraceId()).isEqualTo(expectedSW1TraceId); + break; + case 1: + // The write stream is empty until we enter multiplexing. + assertThat(serverRequest.getWriteStream()).isEqualTo(TEST_STREAM_1); + // Schema is empty if not at the first request after table switch. + assertThat(serverRequest.getProtoRows().hasWriterSchema()).isFalse(); + assertThat(serverRequest.getTraceId()).isEmpty(); + break; + case 2: + // Stream name is always populated after multiplexing. + assertThat(serverRequest.getWriteStream()).isEqualTo(TEST_STREAM_2); + // Schema is populated after table switch. + assertThat( + serverRequest.getProtoRows().getWriterSchema().getProtoDescriptor().getName()) + .isEqualTo("complicate"); + assertThat(serverRequest.getTraceId()).isEqualTo(expectedSW2TraceId); + break; + case 3: + // Schema is empty if not at the first request after table switch. + assertThat(serverRequest.getProtoRows().hasWriterSchema()).isFalse(); + // Stream name is always populated after multiplexing. + assertThat(serverRequest.getWriteStream()).isEqualTo(TEST_STREAM_2); + assertThat(serverRequest.getTraceId()).isEmpty(); + break; + default: // fall out + break; + } + } + + assertThat(connectionWorker.getLoad().destinationCount()).isEqualTo(2); + assertThat(connectionWorker.getLoad().inFlightRequestsBytes()).isEqualTo(0); + } + } + + @Test + void testMultiplexedAppendSuccess_MixEmptyAndNonEmptyTraceId() throws Exception {} + + @Test + void testAppendInSameStream_switchSchema() throws Exception { + try (ConnectionWorker connectionWorker = createMultiplexedConnectionWorker()) { + long appendCount = 20; + for (long i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + List> futures = new ArrayList<>(); + + // Schema1 and schema2 are the same content, but different instance. + ProtoSchema schema1 = createProtoSchema("foo"); + ProtoSchema schema2 = createProtoSchema("foo"); + // Schema3 is a different schema + ProtoSchema schema3 = createProtoSchema("bar"); + + // We do a pattern of: + // send to stream1, schema1 + // send to stream1, schema2 + // send to stream1, schema3 + // send to stream1, schema3 + // send to stream1, schema1 + // ... + StreamWriter sw1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setLocation("us") + .setWriterSchema(schema1) + .build(); + StreamWriter sw2 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setLocation("us") + .setWriterSchema(schema2) + .build(); + StreamWriter sw3 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setLocation("us") + .setWriterSchema(schema3) + .build(); + for (long i = 0; i < appendCount; i++) { + switch ((int) i % 4) { + case 0: + futures.add( + sendTestMessage( + connectionWorker, + sw1, + createFooProtoRows(new String[] {String.valueOf(i)}), + i)); + break; + case 1: + futures.add( + sendTestMessage( + connectionWorker, + sw2, + createFooProtoRows(new String[] {String.valueOf(i)}), + i)); + break; + case 2: + case 3: + futures.add( + sendTestMessage( + connectionWorker, + sw3, + createFooProtoRows(new String[] {String.valueOf(i)}), + i)); + break; + default: // fall out + break; + } + } + // In the real world the response won't contain offset for default stream, but we use offset + // here just to test response. + for (int i = 0; i < appendCount; i++) { + Int64Value offset = futures.get(i).get().getAppendResult().getOffset(); + assertThat(offset).isEqualTo(Int64Value.of(i)); + } + assertThat(testBigQueryWrite.getAppendRequests().size()).isEqualTo(appendCount); + for (int i = 0; i < appendCount; i++) { + AppendRowsRequest serverRequest = testBigQueryWrite.getAppendRequests().get(i); + assertThat(serverRequest.getProtoRows().getRows().getSerializedRowsCount()) + .isGreaterThan(0); + assertThat(serverRequest.getOffset().getValue()).isEqualTo(i); + + // We will get the request as the pattern of: + // (writer_stream: t1, schema: schema1) + // (writer_stream: t1, schema: _) + // (writer_stream: t1, schema: schema3) + // (writer_stream: t1, schema: _) + // (writer_stream: t1, schema: schema1) + // (writer_stream: t1, schema: _) + switch (i % 4) { + case 0: + assertThat(serverRequest.getWriteStream()).isEqualTo(TEST_STREAM_1); + assertThat( + serverRequest.getProtoRows().getWriterSchema().getProtoDescriptor().getName()) + .isEqualTo("foo"); + break; + case 1: + assertThat(serverRequest.getWriteStream()).isEqualTo(TEST_STREAM_1); + // Schema is empty if not at the first request after table switch. + assertThat(serverRequest.getProtoRows().hasWriterSchema()).isFalse(); + break; + case 2: + assertThat(serverRequest.getWriteStream()).isEqualTo(TEST_STREAM_1); + // Schema is populated after table switch. + assertThat( + serverRequest.getProtoRows().getWriterSchema().getProtoDescriptor().getName()) + .isEqualTo("bar"); + break; + case 3: + assertThat(serverRequest.getWriteStream()).isEqualTo(TEST_STREAM_1); + // Schema is empty if not at the first request after table switch. + assertThat(serverRequest.getProtoRows().hasWriterSchema()).isFalse(); + break; + default: // fall out + break; + } + } + + assertThat(connectionWorker.getLoad().destinationCount()).isEqualTo(1); + assertThat(connectionWorker.getLoad().inFlightRequestsBytes()).isEqualTo(0); + } + } + + @Test + void testAppendInSameStreamSwitchArrowSchema() throws Exception { + try (ConnectionWorker connectionWorker = createMultiplexedConnectionWorker()) { + long appendCount = 60; + for (long i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + List> futures = new ArrayList<>(); + + // Schema1 and schema2 are the same content, but different instance. + ArrowSchema schema1 = createArrowSchema("col1"); + ArrowSchema schema2 = createArrowSchema("col1"); + // Schema3 is a different schema + ArrowSchema schema3 = createArrowSchema("col2"); + + // We do a pattern of: + // send to stream1, schema1 + // send to stream1, schema2 + // send to stream1, schema3 + // send to stream1, schema3 + // send to stream1, schema1 + // ... + StreamWriter sw1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setLocation("us") + .setWriterSchema(schema1) + .build(); + StreamWriter sw2 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setLocation("us") + .setWriterSchema(schema2) + .build(); + StreamWriter sw3 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setLocation("us") + .setWriterSchema(schema3) + .build(); + for (long i = 0; i < appendCount; i++) { + switch ((int) i % 4) { + case 0: + futures.add( + sendTestMessage( + connectionWorker, + sw1, + createArrowRecordBatch(schema1, new String[] {String.valueOf(i)}), + i)); + break; + case 1: + futures.add( + sendTestMessage( + connectionWorker, + sw2, + createArrowRecordBatch(schema2, new String[] {String.valueOf(i)}), + i)); + break; + case 2: + case 3: + futures.add( + sendTestMessage( + connectionWorker, + sw3, + createArrowRecordBatch(schema3, new String[] {String.valueOf(i)}), + i)); + break; + default: // fall out + break; + } + } + // In the real world the response won't contain offset for default stream, but we use offset + // here just to test response. + for (int i = 0; i < appendCount; i++) { + Int64Value offset = futures.get(i).get().getAppendResult().getOffset(); + assertThat(offset).isEqualTo(Int64Value.of(i)); + } + assertThat(testBigQueryWrite.getAppendRequests().size()).isEqualTo(appendCount); + for (int i = 0; i < appendCount; i++) { + AppendRowsRequest serverRequest = testBigQueryWrite.getAppendRequests().get(i); + assertThat(serverRequest.getArrowRows().getRows().getSerializedRecordBatch().size()) + .isGreaterThan(0); + assertThat(serverRequest.getOffset().getValue()).isEqualTo(i); + + // Since schema 1 equals schema 2, we will get the request as the pattern of: + // (writer_stream: TEST_STREAM_1, schema: schema1) + // (writer_stream: TEST_STREAM_1, schema: _) + // (writer_stream: TEST_STREAM_1, schema: schema3) + // (writer_stream: TEST_STREAM_1, schema: _) + // (writer_stream: TEST_STREAM_1, schema: schema1) + // (writer_stream: TEST_STREAM_1, schema: _) + switch (i % 4) { + case 0: + assertThat(serverRequest.getWriteStream()).isEqualTo(TEST_STREAM_1); + assertThat(serverRequest.getArrowRows().getWriterSchema()).isEqualTo(schema1); + break; + case 1: + assertThat(serverRequest.getWriteStream()).isEqualTo(TEST_STREAM_1); + // Schema is empty if not at the first request after table switch. + assertThat(serverRequest.getArrowRows().hasWriterSchema()).isFalse(); + break; + case 2: + assertThat(serverRequest.getWriteStream()).isEqualTo(TEST_STREAM_1); + // Schema is populated after table switch. + assertThat(serverRequest.getArrowRows().getWriterSchema()).isEqualTo(schema3); + break; + case 3: + assertThat(serverRequest.getWriteStream()).isEqualTo(TEST_STREAM_1); + // Schema is empty if not at the first request after table switch. + assertThat(serverRequest.getArrowRows().hasWriterSchema()).isFalse(); + break; + default: // fall out + break; + } + } + + assertThat(connectionWorker.getLoad().destinationCount()).isEqualTo(1); + assertThat(connectionWorker.getLoad().inFlightRequestsBytes()).isEqualTo(0); + } + } + + @Test + void testAppendButInflightQueueFull() throws Exception { + ProtoSchema schema1 = createProtoSchema("foo"); + StreamWriter sw1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setLocation("us") + .setWriterSchema(schema1) + .build(); + ConnectionWorker connectionWorker = + new ConnectionWorker( + TEST_STREAM_1, + "us", + createProtoSchema("foo"), + 6, + 100000, + Duration.ofSeconds(100), + FlowController.LimitExceededBehavior.Block, + TEST_TRACE_ID, + null, + client.getSettings(), + retrySettings, + /* enableRequestProfiler= */ false, + /* enableOpenTelemetry= */ false, + /* isMultiplexing= */ false); + testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(1)); + ConnectionWorker.setMaxInflightQueueWaitTime(500); + + long appendCount = 6; + for (int i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + // In total insert 6 requests, since the max queue size is 5 we will stuck at the 6th request. + List> futures = new ArrayList<>(); + for (int i = 0; i < appendCount; i++) { + long startTime = System.currentTimeMillis(); + // At the last request we wait more than 500 millisecond for inflight quota. + if (i == 5) { + assertThrows( + StatusRuntimeException.class, + () -> { + sendTestMessage( + connectionWorker, sw1, createFooProtoRows(new String[] {String.valueOf(5)}), 5); + }); + long timeDiff = System.currentTimeMillis() - startTime; + assertEquals(connectionWorker.getLoad().inFlightRequestsCount(), 5); + assertTrue(timeDiff > 500); + } else { + futures.add( + sendTestMessage( + connectionWorker, sw1, createFooProtoRows(new String[] {String.valueOf(i)}), i)); + assertEquals(connectionWorker.getLoad().inFlightRequestsCount(), i + 1); + } + } + + for (int i = 0; i < appendCount - 1; i++) { + assertEquals(i, futures.get(i).get().getAppendResult().getOffset().getValue()); + } + } + + @Test + void testThrowExceptionWhileWithinAppendLoop() throws Exception { + ProtoSchema schema1 = createProtoSchema("foo"); + StreamWriter sw1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setLocation("us") + .setWriterSchema(schema1) + .build(); + ConnectionWorker connectionWorker = + new ConnectionWorker( + TEST_STREAM_1, + "us", + createProtoSchema("foo"), + 100000, + 100000, + Duration.ofSeconds(100), + FlowController.LimitExceededBehavior.Block, + TEST_TRACE_ID, + null, + client.getSettings(), + retrySettings, + /* enableRequestProfiler= */ false, + /* enableOpenTelemetry= */ false, + /* isMultiplexing= */ true); + testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(1)); + ConnectionWorker.setMaxInflightQueueWaitTime(500); + + long appendCount = 10; + for (int i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + connectionWorker.setTestOnlyRunTimeExceptionInAppendLoop( + new RuntimeException("Any exception can happen.")); + // Sleep 1 second before erroring out. + connectionWorker.setTestOnlyAppendLoopSleepTime(1000L); + + // In total insert 5 requests, + List> futures = new ArrayList<>(); + for (int i = 0; i < appendCount; i++) { + futures.add( + sendTestMessage( + connectionWorker, sw1, createFooProtoRows(new String[] {String.valueOf(i)}), i)); + assertEquals(connectionWorker.getLoad().inFlightRequestsCount(), i + 1); + } + + for (int i = 0; i < appendCount; i++) { + int finalI = i; + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> futures.get(finalI).get().getAppendResult().getOffset().getValue()); + if (i == 0) { + assertThat(ex.getCause()).hasMessageThat().contains("Any exception can happen."); + } else { + assertThat(ex.getCause()).hasMessageThat().contains("Connection is aborted due to "); + } + } + + // The future append will directly fail. + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> + sendTestMessage( + connectionWorker, + sw1, + createFooProtoRows(new String[] {String.valueOf(100)}), + 100) + .get()); + assertThat(ex.getCause()).hasMessageThat().contains("Any exception can happen."); + } + + @Test + void testLocationMismatch() throws Exception { + ProtoSchema schema1 = createProtoSchema("foo"); + StreamWriter sw1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(schema1) + .setLocation("eu") + .build(); + ConnectionWorker connectionWorker = + new ConnectionWorker( + TEST_STREAM_1, + "us", + createProtoSchema("foo"), + 100000, + 100000, + Duration.ofSeconds(100), + FlowController.LimitExceededBehavior.Block, + TEST_TRACE_ID, + null, + client.getSettings(), + retrySettings, + /* enableRequestProfiler= */ false, + /* enableOpenTelemetry= */ false, + /* isMultiplexing= */ true); + StatusRuntimeException ex = + assertThrows( + StatusRuntimeException.class, + () -> + sendTestMessage( + connectionWorker, + sw1, + createFooProtoRows(new String[] {String.valueOf(0)}), + 0)); + assertEquals( + "INVALID_ARGUMENT: StreamWriter with location eu is scheduled to use a connection with" + + " location us", + ex.getMessage()); + } + + @Test + void testStreamNameMismatch() throws Exception { + ProtoSchema schema1 = createProtoSchema("foo"); + StreamWriter sw1 = + StreamWriter.newBuilder(TEST_STREAM_1, client).setWriterSchema(schema1).build(); + ConnectionWorker connectionWorker = + new ConnectionWorker( + TEST_STREAM_2, + null, + createProtoSchema("foo"), + 100000, + 100000, + Duration.ofSeconds(100), + FlowController.LimitExceededBehavior.Block, + TEST_TRACE_ID, + null, + client.getSettings(), + retrySettings, + /* enableRequestProfiler= */ false, + /* enableOpenTelemetry= */ false, + /* isMultiplexing= */ true); + StatusRuntimeException ex = + assertThrows( + StatusRuntimeException.class, + () -> + sendTestMessage( + connectionWorker, + sw1, + createFooProtoRows(new String[] {String.valueOf(0)}), + 0)); + assertEquals( + "INVALID_ARGUMENT: StreamWriter with stream name" + + " projects/p1/datasets/d1/tables/t1/streams/s1 is scheduled to use a connection with" + + " stream name projects/p2/datasets/d2/tables/t2/streams/s2", + ex.getMessage()); + } + + @Test + void testExponentialBackoff() throws Exception { + assertThat(ConnectionWorker.calculateSleepTimeMilli(0)).isEqualTo(50); + assertThat(ConnectionWorker.calculateSleepTimeMilli(5)).isEqualTo(1600); + assertThat(ConnectionWorker.calculateSleepTimeMilli(100)).isEqualTo(60000); + } + + private AppendRowsResponse createAppendResponse(long offset) { + return AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(offset)).build()) + .build(); + } + + private ConnectionWorker createMultiplexedConnectionWorker() throws IOException { + // By default use only the first table as table reference. + return createMultiplexedConnectionWorker( + TEST_STREAM_1, TEST_TRACE_ID, 100, 1000, java.time.Duration.ofSeconds(5)); + } + + private ConnectionWorker createMultiplexedConnectionWorker( + String streamName, + String traceId, + long maxRequests, + long maxBytes, + java.time.Duration maxRetryDuration) + throws IOException { + return new ConnectionWorker( + streamName, + "us", + createProtoSchema("foo"), + maxRequests, + maxBytes, + maxRetryDuration, + FlowController.LimitExceededBehavior.Block, + TEST_TRACE_ID, + null, + client.getSettings(), + retrySettings, + /* enableRequestProfiler= */ false, + /* enableOpenTelemetry= */ false, + /* isMultiplexing= */ true); + } + + private ProtoSchema createProtoSchema(String protoName) { + return ProtoSchema.newBuilder() + .setProtoDescriptor( + DescriptorProtos.DescriptorProto.newBuilder() + .setName(protoName) + .addField( + DescriptorProtos.FieldDescriptorProto.newBuilder() + .setName("foo") + .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING) + .setNumber(1) + .build()) + .build()) + .build(); + } + + private ApiFuture sendTestMessage( + ConnectionWorker connectionWorker, + StreamWriter streamWriter, + ProtoRows protoRows, + long offset) { + return connectionWorker.append( + streamWriter, + AppendFormats.AppendRowsData.of(protoRows), + offset, + /* requestUniqueId= */ "request_" + offset); + } + + private ApiFuture sendTestMessage( + ConnectionWorker connectionWorker, + StreamWriter streamWriter, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch arrowRecordBatch, + long offset) { + return connectionWorker.append( + streamWriter, + AppendFormats.AppendRowsData.of(arrowRecordBatch), + offset, + /* requestUniqueId= */ "request_" + offset); + } + + private ProtoRows createFooProtoRows(String[] messages) { + ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); + for (String message : messages) { + FooType foo = FooType.newBuilder().setFoo(message).build(); + rowsBuilder.addSerializedRows(foo.toByteString()); + } + return rowsBuilder.build(); + } + + private ProtoRows createComplicateTypeProtoRows(String[] messages) { + ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); + for (String message : messages) { + ComplicateType complicateType = + ComplicateType.newBuilder() + .setInnerType(InnerType.newBuilder().addValue(message)) + .build(); + rowsBuilder.addSerializedRows(complicateType.toByteString()); + } + return rowsBuilder.build(); + } + + private ArrowSchema createArrowSchema(String fieldName) { + Field col = new Field(fieldName, FieldType.nullable(new ArrowType.Utf8()), null); + Schema arrowSchema = new Schema(Arrays.asList(col)); + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + try { + MessageSerializer.serialize(new WriteChannel(Channels.newChannel(out)), arrowSchema); + } catch (IOException e) { + throw new IllegalStateException("Failed to serialize arrow schema.", e); + } + byte[] bytes = out.toByteArray(); + return ArrowSchema.newBuilder().setSerializedSchema(ByteString.copyFrom(bytes)).build(); + } + + private com.google.cloud.bigquery.storage.v1.ArrowRecordBatch createArrowRecordBatch( + ArrowSchema arrowSchema, String[] messages) { + try { + Schema schema = + MessageSerializer.deserializeSchema( + new ReadChannel( + Channels.newChannel( + new ByteArrayInputStream(arrowSchema.getSerializedSchema().toByteArray())))); + try (VectorSchemaRoot vectorSchemaRoot = + VectorSchemaRoot.create(schema, new RootAllocator())) { + VarCharVector varCharVector = (VarCharVector) vectorSchemaRoot.getVector(0); + varCharVector.allocateNew(messages.length); + for (int i = 0; i < messages.length; i++) { + varCharVector.set(i, messages[i].getBytes(UTF_8)); + } + vectorSchemaRoot.setRowCount(messages.length); + + VectorUnloader vectorUnloader = new VectorUnloader(vectorSchemaRoot); + try (final ArrowRecordBatch recordBatch = vectorUnloader.getRecordBatch()) { + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + MessageSerializer.serialize(new WriteChannel(Channels.newChannel(out)), recordBatch); + ByteString serialized = ByteString.copyFrom(out.toByteArray()); + return com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.newBuilder() + .setSerializedRecordBatch(serialized) + .build(); + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Test + void testLoadCompare_compareLoad() { + // In flight bytes bucket is split as per 1024 requests per bucket. + // When in flight bytes is in lower bucket, even destination count is higher and request count + // is higher, the load is still smaller. + Load load1 = ConnectionWorker.Load.create(1000, 2000, 100, 1000, 10); + Load load2 = ConnectionWorker.Load.create(2000, 1000, 10, 1000, 10); + assertThat(Load.LOAD_COMPARATOR.compare(load1, load2)).isLessThan(0); + + // In flight bytes in the same bucke of request bytes will compare request count. + Load load3 = ConnectionWorker.Load.create(1, 300, 10, 0, 10); + Load load4 = ConnectionWorker.Load.create(10, 1, 10, 0, 10); + assertThat(Load.LOAD_COMPARATOR.compare(load3, load4)).isGreaterThan(0); + + // In flight request and bytes in the same bucket will compare the destination count. + Load load5 = ConnectionWorker.Load.create(200, 1, 10, 1000, 10); + Load load6 = ConnectionWorker.Load.create(100, 10, 10, 1000, 10); + assertThat(Load.LOAD_COMPARATOR.compare(load5, load6) == 0).isTrue(); + } + + @Test + void testLoadIsOverWhelmed() { + // Only in flight request is considered in current overwhelmed calculation. + Load load1 = ConnectionWorker.Load.create(60, 10, 100, 90, 100); + assertThat(load1.isOverwhelmed()).isTrue(); + + Load load2 = ConnectionWorker.Load.create(1, 1, 100, 100, 100); + assertThat(load2.isOverwhelmed()).isFalse(); + } + + @Test + void testThrowExceptionWhileWithinAppendLoop_MaxWaitTimeExceed() throws Exception { + ProtoSchema schema1 = createProtoSchema("foo"); + ConnectionWorker.setMaxInflightRequestWaitTime(Duration.ofSeconds(1)); + StreamWriter sw1 = + StreamWriter.newBuilder(TEST_STREAM_1, client).setWriterSchema(schema1).build(); + ConnectionWorker connectionWorker = + new ConnectionWorker( + TEST_STREAM_1, + null, + createProtoSchema("foo"), + 100000, + 100000, + Duration.ofSeconds(100), + FlowController.LimitExceededBehavior.Block, + TEST_TRACE_ID, + null, + client.getSettings(), + retrySettings, + /* enableRequestProfiler= */ false, + /* enableOpenTelemetry= */ false, + /*isMultiplexing*/ false); + java.time.Duration durationSleep = java.time.Duration.ofSeconds(2); + testBigQueryWrite.setResponseSleep(durationSleep); + + long appendCount = 2; + for (int i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + // In total insert 'appendCount' requests, + List> futures = new ArrayList<>(); + for (int i = 0; i < appendCount; i++) { + futures.add( + sendTestMessage( + connectionWorker, sw1, createFooProtoRows(new String[] {String.valueOf(i)}), i)); + assertEquals(connectionWorker.getLoad().inFlightRequestsCount(), i + 1); + } + + for (int i = 0; i < appendCount; i++) { + int finalI = i; + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> futures.get(finalI).get().getAppendResult().getOffset().getValue()); + if (i == 0) { + assertThat(ex.getCause()).hasMessageThat().contains("Request has waited in inflight queue"); + } else { + assertThat(ex.getCause()).hasMessageThat().contains("Connection is aborted due to "); + } + } + + // The future append will directly fail. + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> + sendTestMessage( + connectionWorker, + sw1, + createFooProtoRows(new String[] {String.valueOf(100)}), + 100) + .get()); + assertThat(ex.getCause()).hasMessageThat().contains("Request has waited in inflight queue"); + + // Verify we can shutdown normally within the expected time. + long startCloseTime = System.currentTimeMillis(); + connectionWorker.close(); + long timeDiff = System.currentTimeMillis() - startCloseTime; + assertTrue( + timeDiff <= (appendCount * durationSleep.toMillis()), + "timeDiff: " + + timeDiff + + " is more than total durationSleep: " + + (appendCount * durationSleep.toMillis())); + assertTrue(connectionWorker.isUserClosed()); + } + + @Test + void testLongTimeIdleWontFail() throws Exception { + ProtoSchema schema1 = createProtoSchema("foo"); + ConnectionWorker.setMaxInflightRequestWaitTime(Duration.ofSeconds(1)); + StreamWriter sw1 = + StreamWriter.newBuilder(TEST_STREAM_1, client).setWriterSchema(schema1).build(); + ConnectionWorker connectionWorker = + new ConnectionWorker( + TEST_STREAM_1, + null, + createProtoSchema("foo"), + 100000, + 100000, + Duration.ofSeconds(100), + FlowController.LimitExceededBehavior.Block, + TEST_TRACE_ID, + null, + client.getSettings(), + retrySettings, + /* enableRequestProfiler= */ false, + /* enableOpenTelemetry= */ false, + /*isMultiplexing*/ false); + + long appendCount = 10; + for (int i = 0; i < appendCount * 2; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + // In total insert 5 requests, + List> futures = new ArrayList<>(); + for (int i = 0; i < appendCount; i++) { + futures.add( + sendTestMessage( + connectionWorker, sw1, createFooProtoRows(new String[] {String.valueOf(i)}), i)); + } + // Sleep 2 seconds to make sure request queue is empty. + Thread.sleep(2000); + assertEquals(connectionWorker.getLoad().inFlightRequestsCount(), 0); + for (int i = 0; i < appendCount; i++) { + futures.add( + sendTestMessage( + connectionWorker, + sw1, + createFooProtoRows(new String[] {String.valueOf(i)}), + i + appendCount)); + } + for (int i = 0; i < appendCount * 2; i++) { + assertEquals(i, futures.get(i).get().getAppendResult().getOffset().getValue()); + } + } + + private void exerciseOpenTelemetryAttributesWithStreamNames(String streamName, String expected) + throws Exception { + ConnectionWorker connectionWorker = + new ConnectionWorker( + streamName, + null, + createProtoSchema("foo"), + 100000, + 100000, + Duration.ofSeconds(100), + FlowController.LimitExceededBehavior.Block, + null, + null, + client.getSettings(), + retrySettings, + /* enableRequestProfiler= */ false, + /* enableOpenTelemetry= */ true, + /*isMultiplexing*/ false); + + Attributes attributes = connectionWorker.getTelemetryAttributes(); + String attributesTableId = attributes.get(TelemetryMetrics.telemetryKeyTableId); + assertEquals(expected, attributesTableId); + } + + @Test + void testOpenTelemetryAttributesWithStreamNames() throws Exception { + exerciseOpenTelemetryAttributesWithStreamNames( + "projects/my_project/datasets/my_dataset/tables/my_table/streams/my_stream", + "projects/my_project/datasets/my_dataset/tables/my_table"); + exerciseOpenTelemetryAttributesWithStreamNames( + "projects/my_project/datasets/my_dataset/tables/my_table/", + "projects/my_project/datasets/my_dataset/tables/my_table"); + exerciseOpenTelemetryAttributesWithStreamNames( + "projects/my_project/datasets/my_dataset/tables/", null); + } + + void checkOpenTelemetryTraceIdAttribute(Attributes attributes, int index, String expected) { + String attributesTraceId = attributes.get(TelemetryMetrics.telemetryKeysTraceId.get(index)); + assertEquals(expected, attributesTraceId); + } + + void exerciseOpenTelemetryAttributesWithTraceId( + String traceId, String expectedField1, String expectedField2, String expectedField3) + throws Exception { + ConnectionWorker connectionWorker = + new ConnectionWorker( + TEST_STREAM_1, + null, + createProtoSchema("foo"), + 100000, + 100000, + Duration.ofSeconds(100), + FlowController.LimitExceededBehavior.Block, + traceId, + null, + client.getSettings(), + retrySettings, + /* enableRequestProfiler= */ false, + /* enableOpenTelemetry= */ true, + /*isMultiplexing*/ false); + + Attributes attributes = connectionWorker.getTelemetryAttributes(); + checkOpenTelemetryTraceIdAttribute(attributes, 0, expectedField1); + checkOpenTelemetryTraceIdAttribute(attributes, 1, expectedField2); + checkOpenTelemetryTraceIdAttribute(attributes, 2, expectedField3); + } + + @Test + void testOpenTelemetryAttributesWithTraceId() throws Exception { + exerciseOpenTelemetryAttributesWithTraceId(null, null, null, null); + exerciseOpenTelemetryAttributesWithTraceId("a:b:c", null, null, null); + exerciseOpenTelemetryAttributesWithTraceId( + "java-streamwriter:HEAD+20240508-1544" + + " Dataflow:monorail-c-multi:2024-05-08_11_44_34-6968230696879535523:1972585693681960752", + "monorail-c-multi", + "2024-05-08_11_44_34-6968230696879535523", + "1972585693681960752"); + exerciseOpenTelemetryAttributesWithTraceId( + "Dataflow:2024-04-26_23_19_08-12221961051154168466", + "2024-04-26_23_19_08-12221961051154168466", + null, + null); + exerciseOpenTelemetryAttributesWithTraceId( + "Dataflow:writeapi3:2024-04-03_03_49_33-845412829237675723:63737042897365355", + "writeapi3", + "2024-04-03_03_49_33-845412829237675723", + "63737042897365355"); + exerciseOpenTelemetryAttributesWithTraceId( + "java-streamwriter" + + " Dataflow:pubsub-to-bq-staging-tongruil-1024-static:2024-05-14_15_13_14-5530509399715326669:4531186922674871499", + "pubsub-to-bq-staging-tongruil-1024-static", + "2024-05-14_15_13_14-5530509399715326669", + "4531186922674871499"); + exerciseOpenTelemetryAttributesWithTraceId("a:b dataflow :c", null, null, null); + exerciseOpenTelemetryAttributesWithTraceId("a:b dataflow:c:d", "c", "d", null); + } + + @Test + void testDoubleDisconnectWithShorterRetryDuration() throws Exception { + // simulate server disconnect due to idle stream + testBigQueryWrite.setFailedStatus( + Status.ABORTED.withDescription( + "Closing the stream because it has been inactive for 600 seconds.")); + testBigQueryWrite.setCloseEveryNAppends(1); + testBigQueryWrite.setTimesToClose( + 2); // Total of 2 connection failures. The time interval between the processing of these + // failures will exceed the configured maxRetryDuration. + testBigQueryWrite.addResponse(createAppendResponse(0)); + + ProtoSchema schema1 = createProtoSchema("foo"); + StreamWriter sw1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setLocation("us") + .setWriterSchema(schema1) + .build(); + ConnectionWorker connectionWorker = + new ConnectionWorker( + TEST_STREAM_1, + "us", + createProtoSchema("foo"), + 100000, + 100000, + Duration.ofMillis(1), // very small maxRetryDuration + FlowController.LimitExceededBehavior.Block, + TEST_TRACE_ID, + null, + client.getSettings(), + retrySettings, + /* enableRequestProfiler= */ false, + /* enableOpenTelemetry= */ false, + /*isMultiplexing*/ false); + + List> futures = new ArrayList<>(); + futures.add( + sendTestMessage( + connectionWorker, sw1, createFooProtoRows(new String[] {String.valueOf(0)}), 0)); + + assertEquals(0, futures.get(0).get().getAppendResult().getOffset().getValue()); + } + + @Test + void testLocationName() throws Exception { + assertEquals( + "projects/p1/locations/us", ConnectionWorker.getRoutingHeader(TEST_STREAM_1, "us")); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWrite.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWrite.java new file mode 100644 index 000000000000..375949df2a95 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWrite.java @@ -0,0 +1,137 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import io.grpc.Status; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.ScheduledExecutorService; +import java.util.function.Supplier; + +/** + * A fake implementation of {@link MockGrpcService}, that can be used to test clients of a + * StreamWriter. It forwards calls to the real implementation (@link FakeBigQueryWriteImpl}. + */ +public class FakeBigQueryWrite implements MockGrpcService { + private final FakeBigQueryWriteImpl serviceImpl; + + public FakeBigQueryWrite() { + serviceImpl = new FakeBigQueryWriteImpl(); + } + + @Override + public List getRequests() { + return new LinkedList(serviceImpl.getCapturedRequests()); + } + + void waitForResponseScheduled() throws InterruptedException { + serviceImpl.waitForResponseScheduled(); + } + + public List getAppendRequests() { + return serviceImpl.getCapturedRequests(); + } + + public List getWriteStreamRequests() { + return serviceImpl.getCapturedWriteRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + if (response instanceof AppendRowsResponse) { + serviceImpl.addResponse((AppendRowsResponse) response); + } else if (response instanceof WriteStream) { + serviceImpl.addWriteStreamResponse((WriteStream) response); + } else if (response instanceof FlushRowsResponse) { + serviceImpl.addFlushRowsResponse((FlushRowsResponse) response); + } else { + throw new IllegalStateException("Unsupported service"); + } + } + + /** + * Add a response supplier to end of list. This supplier can be used to simulate retries or other + * forms of behavior. + */ + void addResponse(Supplier response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addConnectionError(exception); + } + + void addStatusException(com.google.rpc.Status status) { + serviceImpl.addException(status); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } + + void setResponseSleep(Duration sleep) { + serviceImpl.setResponseSleep(sleep); + } + + void setCloseEveryNAppends(long closeAfter) { + serviceImpl.setCloseEveryNAppends(closeAfter); + } + + void setTimesToClose(long numberTimesToClose) { + serviceImpl.setTimesToClose(numberTimesToClose); + } + + void setCloseForeverAfter(long closeForeverAfter) { + serviceImpl.setCloseForeverAfter(closeForeverAfter); + } + + public long getConnectionCount() { + return serviceImpl.getConnectionCount(); + } + + void setExecutor(ScheduledExecutorService executor) { + serviceImpl.setExecutor(executor); + } + + void setFailedStatus(Status failedStatus) { + serviceImpl.setFailedStatus(failedStatus); + } + + void setReturnErrorDuringExclusiveStreamRetry(boolean retryOnError) { + serviceImpl.setReturnErrorDuringExclusiveStreamRetry(retryOnError); + } + + void setVerifyOffset(boolean verifyOffset) { + serviceImpl.setVerifyOffset(verifyOffset); + } + + public ArrayList getLatestRequestReceivedInstants() { + return serviceImpl.getLatestRequestReceivedInstants(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWriteImpl.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWriteImpl.java new file mode 100644 index 000000000000..efb19fd7d092 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWriteImpl.java @@ -0,0 +1,402 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.common.base.Optional; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.rpc.Code; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; +import java.util.logging.Logger; + +/** + * A fake implementation of {@link BigQueryWriteImplBase} that can acts like server in StreamWriter + * unit testing. + */ +class FakeBigQueryWriteImpl extends BigQueryWriteGrpc.BigQueryWriteImplBase { + + private static final Logger LOG = Logger.getLogger(FakeBigQueryWriteImpl.class.getName()); + private final List> responses = + Collections.synchronizedList(new ArrayList<>()); + private final LinkedBlockingQueue requests = new LinkedBlockingQueue<>(); + private final LinkedBlockingQueue writeRequests = + new LinkedBlockingQueue<>(); + private final LinkedBlockingQueue flushRequests = new LinkedBlockingQueue<>(); + private final LinkedBlockingQueue writeResponses = new LinkedBlockingQueue<>(); + private final LinkedBlockingQueue flushResponses = new LinkedBlockingQueue<>(); + private final AtomicInteger nextMessageId = new AtomicInteger(1); + private boolean autoPublishResponse; + private ScheduledExecutorService executor = null; + + private Duration responseSleep = Duration.ZERO; + private Semaphore responseSemaphore = new Semaphore(0, true); + + private long numberTimesToClose = 0; + private long closeAfter = 0; + private long recordCount = 0; + private long connectionCount = 0; + private long closeForeverAfter = 0; + private int responseIndex = 0; + private long expectedOffset = 0; + private boolean verifyOffset = false; + private boolean returnErrorDuringExclusiveStreamRetry = false; + private boolean returnErrorUntilRetrySuccess = false; + private Response retryResponse; + private long retryingOffset = -1; + + // Record whether the first record has been seen on a connection. + private final Map, Boolean> connectionToFirstRequest = + new ConcurrentHashMap<>(); + private Status failedStatus = Status.ABORTED; + private ArrayList requestReceivedInstants = new ArrayList<>(); + + /** Class used to save the state of a possible response. */ + public static class Response { + + Optional appendResponse; + Optional error; + + public Response(AppendRowsResponse appendResponse) { + this.appendResponse = Optional.of(appendResponse); + this.error = Optional.absent(); + } + + public Response(Throwable exception) { + this.appendResponse = Optional.absent(); + this.error = Optional.of(exception); + } + + public AppendRowsResponse getResponse() { + return appendResponse.get(); + } + + public Throwable getError() { + return error.get(); + } + + boolean isError() { + return error.isPresent(); + } + + @Override + public String toString() { + if (isError()) { + return error.get().toString(); + } + return appendResponse.get().toString(); + } + } + + public ArrayList getLatestRequestReceivedInstants() { + return requestReceivedInstants; + } + + @Override + public void getWriteStream( + GetWriteStreamRequest request, StreamObserver responseObserver) { + Object response = writeResponses.remove(); + if (response instanceof WriteStream) { + writeRequests.add(request); + responseObserver.onNext((WriteStream) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void flushRows( + FlushRowsRequest request, StreamObserver responseObserver) { + Object response = writeResponses.remove(); + if (response instanceof FlushRowsResponse) { + flushRequests.add(request); + responseObserver.onNext((FlushRowsResponse) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + void waitForResponseScheduled() throws InterruptedException { + responseSemaphore.acquire(); + } + + /* Return the number of times the stream was connected. */ + public long getConnectionCount() { + return connectionCount; + } + + void setFailedStatus(Status failedStatus) { + this.failedStatus = failedStatus; + } + + private Response determineResponse(long offset) { + // The logic here checks to see if a retry is ongoing. The implication is that the + // offset that is being retried (retryingOffset) should lead to returning the same error + // over and over until a request eventually resolves, instead of calling get() on + // suppliers that, in the future, may be expected to trigger full retry loops. + Response response; + // Retry is in progress and the offset isn't the retrying offset; return saved response + if (returnErrorUntilRetrySuccess && offset != retryingOffset) { + response = retryResponse; + } else { + // We received the retryingOffset OR we aren't in retry mode; get response as + // expected. + // In case of connection reset: normally each response will only be sent once. But, if the + // stream is aborted, the last few responses may not be received, and the client will request + // them again. + response = responses.get(Math.toIntExact(offset)).get(); + // If we are in retry mode and don't have an error, clear retry variables + if (returnErrorUntilRetrySuccess && !response.getResponse().hasError()) { + retryingOffset = -1; + retryResponse = null; + } + } + + returnErrorUntilRetrySuccess = + returnErrorDuringExclusiveStreamRetry && response.getResponse().hasError(); + // If this is a new retry cycle, set retry variables + if (retryingOffset == -1 && returnErrorUntilRetrySuccess) { + retryingOffset = offset; + retryResponse = response; + } + + return response; + } + + @Override + public StreamObserver appendRows( + final StreamObserver responseObserver) { + this.connectionCount++; + connectionToFirstRequest.put(responseObserver, true); + StreamObserver requestObserver = + new StreamObserver() { + @Override + public void onNext(AppendRowsRequest value) { + requestReceivedInstants.add(Instant.now()); + recordCount++; + requests.add(value); + long offset = value.getOffset().getValue(); + if (offset == -1 || !value.hasOffset()) { + offset = responseIndex; + } + responseIndex++; + if (responseSleep.compareTo(Duration.ZERO) > 0) { + LOG.info("Sleeping before response for " + responseSleep.toString()); + Uninterruptibles.sleepUninterruptibly( + responseSleep.toMillis(), TimeUnit.MILLISECONDS); + } + if (connectionToFirstRequest.get(responseObserver)) { + if (!(value.getProtoRows().hasWriterSchema() + || value.getArrowRows().hasWriterSchema()) + || value.getWriteStream().isEmpty()) { + LOG.info( + String.valueOf( + !value.getProtoRows().hasWriterSchema() + || value.getWriteStream().isEmpty())); + responseObserver.onError( + Status.INVALID_ARGUMENT + .withDescription("Unexpected first request: " + value.toString()) + .asException()); + return; + } + } + connectionToFirstRequest.put(responseObserver, false); + if (closeAfter > 0 + && responseIndex % closeAfter == 0 + && recordCount % closeAfter == 0 + && (numberTimesToClose == 0 || connectionCount <= numberTimesToClose)) { + LOG.info("Shutting down connection from test..."); + responseObserver.onError(failedStatus.asException()); + } else if (closeForeverAfter > 0 && recordCount > closeForeverAfter) { + LOG.info("Shutting down connection from test..."); + responseObserver.onError(failedStatus.asException()); + } else { + Response response = determineResponse(offset); + if (verifyOffset + && !response.getResponse().hasError() + && response.getResponse().getAppendResult().getOffset().getValue() > -1) { + // No error and offset is present; verify order + if (response.getResponse().getAppendResult().getOffset().getValue() + != expectedOffset) { + com.google.rpc.Status status = + com.google.rpc.Status.newBuilder().setCode(Code.INTERNAL_VALUE).build(); + response = new Response(AppendRowsResponse.newBuilder().setError(status).build()); + } else { + LOG.info( + String.format( + "asserted offset: %s expected: %s", + response.getResponse().getAppendResult().getOffset().getValue(), + expectedOffset)); + LOG.info(String.format("sending response: %s", response.getResponse())); + expectedOffset++; + } + } + sendResponse(response, responseObserver); + } + } + + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } + + @Override + public void onCompleted() { + responseObserver.onCompleted(); + } + }; + return requestObserver; + } + + private void sendResponse( + Response response, StreamObserver responseObserver) { + if (response.isError()) { + responseObserver.onError(response.getError()); + } else { + responseObserver.onNext(response.getResponse()); + } + } + + /** Set an executor to use to delay publish responses. */ + public FakeBigQueryWriteImpl setExecutor(ScheduledExecutorService executor) { + this.executor = executor; + return this; + } + + /** Set an amount of time by which to sleep before publishing responses. */ + public FakeBigQueryWriteImpl setResponseSleep(Duration responseSleep) { + this.responseSleep = responseSleep; + return this; + } + + /** + * Add a response to end of list. Response can be either an record, or an exception. All repsones + * must be set up before any rows are appended. + */ + void addResponse(AppendRowsResponse appendRowsResponse) { + responses.add(() -> new Response(appendRowsResponse)); + } + + /** + * Add a response supplier to end of list. This supplier can be used to simulate retries or other + * forms of behavior. + */ + void addResponse(Supplier response) { + responses.add(response); + } + + public FakeBigQueryWriteImpl addWriteStreamResponse(WriteStream response) { + writeResponses.add(response); + return this; + } + + public FakeBigQueryWriteImpl addFlushRowsResponse(FlushRowsResponse response) { + flushResponses.add(response); + return this; + } + + public FakeBigQueryWriteImpl addConnectionError(Throwable error) { + responses.add(() -> new Response(error)); + return this; + } + + /** + * Returns the given status, instead of a valid response. This should be treated as an exception + * on the other side. This will not stop processing. + */ + void addException(com.google.rpc.Status status) { + responses.add(() -> new Response(AppendRowsResponse.newBuilder().setError(status).build())); + } + + /** + * Will abort the connection instead of return a valid response. This should NOT be used to return + * a retriable error (as that will cause an infinite loop.) + */ + void addNonRetriableError(com.google.rpc.Status status) { + responses.add(() -> new Response(AppendRowsResponse.newBuilder().setError(status).build())); + } + + void setVerifyOffset(boolean verifyOffset) { + this.verifyOffset = verifyOffset; + } + + void setReturnErrorDuringExclusiveStreamRetry(boolean retryOnError) { + this.returnErrorDuringExclusiveStreamRetry = retryOnError; + } + + public List getCapturedRequests() { + return new ArrayList(requests); + } + + public List getCapturedWriteRequests() { + return new ArrayList(writeRequests); + } + + void reset() { + requests.clear(); + responses.clear(); + } + + /* Abort the stream after N records. The primary use case is to test the retry logic. After N + * records are sent, the stream will be aborted with Code.ABORTED. This is a retriable error. + * The abort will call the onDone callback immediately, and thus potentially losing some messages + * that have already been sent. If the value of closeAfter is too small, the client might not get + * a chance to process any records before a subsequent abort is sent. Which means multiple retries + * in a row on the client side. After 3 retries in a row the write will fail. + * closeAfter should be large enough to give the client some opportunity to receive some of the + * messages. + **/ + void setCloseEveryNAppends(long closeAfter) { + this.closeAfter = closeAfter; + } + + /* If setCloseEveryNAppends is greater than 0, then the stream will be aborted every N appends. + * setTimesToClose will limit the number of times to do the abort. If it is set to 0, it will + * abort every N appends. + * The primary use cases is, send a couple of records, then abort. But if there are only a couple + * of records, it is possible these two records are sent, then the abort happens before those two + * records are processed by the client, requiring them to be sent again, and thus a potential + * infinite loop. Therefore set the times to close to 1. This will send the two records, force + * an abort an retry, and then reprocess the records to completion. + **/ + void setTimesToClose(long numberTimesToClose) { + this.numberTimesToClose = numberTimesToClose; + } + + /* The connection will forever return failure after numberTimesToClose. This option shouldn't + * be used together with setCloseEveryNAppends and setTimesToClose*/ + void setCloseForeverAfter(long closeForeverAfter) { + this.closeForeverAfter = closeForeverAfter; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeClock.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeClock.java new file mode 100644 index 000000000000..52a5eee7ce69 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeClock.java @@ -0,0 +1,41 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.ApiClock; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +/** A Clock to help with testing time-based logic. */ +public class FakeClock implements ApiClock { + + private final AtomicLong millis = new AtomicLong(); + + // Advances the clock value by {@code time} in {@code timeUnit}. + void advance(long time, TimeUnit timeUnit) { + millis.addAndGet(timeUnit.toMillis(time)); + } + + @Override + public long nanoTime() { + return millisTime() * 1000_000L; + } + + @Override + public long millisTime() { + return millis.get(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeScheduledExecutorService.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeScheduledExecutorService.java new file mode 100644 index 000000000000..16efa2c73d91 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeScheduledExecutorService.java @@ -0,0 +1,346 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.ApiClock; +import com.google.common.primitives.Ints; +import com.google.common.util.concurrent.SettableFuture; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Deque; +import java.util.LinkedList; +import java.util.List; +import java.util.PriorityQueue; +import java.util.concurrent.AbstractExecutorService; +import java.util.concurrent.Callable; +import java.util.concurrent.Delayed; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Logger; + +/** + * Fake implementation of {@link ScheduledExecutorService} that allows tests control the reference + * time of the executor and decide when to execute any outstanding task. + */ +public class FakeScheduledExecutorService extends AbstractExecutorService + implements ScheduledExecutorService { + private static final Logger LOG = Logger.getLogger(FakeScheduledExecutorService.class.getName()); + + private final AtomicBoolean shutdown = new AtomicBoolean(false); + private final PriorityQueue> pendingCallables = new PriorityQueue<>(); + private final FakeClock clock = new FakeClock(); + private final Deque expectedWorkQueue = new LinkedList<>(); + + public ApiClock getClock() { + return clock; + } + + @Override + public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { + return schedulePendingCallable( + new PendingCallable<>( + Duration.ofMillis(unit.toMillis(delay)), command, PendingCallableType.NORMAL)); + } + + @Override + public ScheduledFuture schedule(Callable callable, long delay, TimeUnit unit) { + return schedulePendingCallable( + new PendingCallable<>( + Duration.ofMillis(unit.toMillis(delay)), callable, PendingCallableType.NORMAL)); + } + + @Override + public ScheduledFuture scheduleAtFixedRate( + Runnable command, long initialDelay, long period, TimeUnit unit) { + return schedulePendingCallable( + new PendingCallable<>( + Duration.ofMillis(unit.toMillis(initialDelay)), + command, + PendingCallableType.FIXED_RATE)); + } + + @Override + public ScheduledFuture scheduleWithFixedDelay( + Runnable command, long initialDelay, long delay, TimeUnit unit) { + return schedulePendingCallable( + new PendingCallable<>( + Duration.ofMillis(unit.toMillis(initialDelay)), + command, + PendingCallableType.FIXED_DELAY)); + } + + /** + * This will advance the reference time of the executor and execute (in the same thread) any + * outstanding callable which execution time has passed. + */ + void advanceTime(Duration toAdvance) { + LOG.info( + "Advance to time to:" + + Instant.ofEpochMilli(clock.millisTime() + toAdvance.toMillis()).toString()); + clock.advance(toAdvance.toMillis(), TimeUnit.MILLISECONDS); + work(); + } + + private void work() { + for (; ; ) { + PendingCallable callable = null; + Instant cmpTime = Instant.ofEpochMilli(clock.millisTime()); + if (!pendingCallables.isEmpty()) { + LOG.info( + "Going to call: Current time: " + + cmpTime.toString() + + " Scheduled time: " + + pendingCallables.peek().getScheduledTime().toString() + + " Creation time:" + + pendingCallables.peek().getCreationTime().toString()); + } + synchronized (pendingCallables) { + if (pendingCallables.isEmpty() + || pendingCallables.peek().getScheduledTime().isAfter(cmpTime)) { + break; + } + callable = pendingCallables.poll(); + } + if (callable != null) { + try { + callable.call(); + } catch (Exception e) { + // We ignore any callable exception, which should be set to the future but not relevant to + // advanceTime. + } + } + } + + synchronized (pendingCallables) { + if (shutdown.get() && pendingCallables.isEmpty()) { + pendingCallables.notifyAll(); + } + } + } + + @Override + public void shutdown() { + if (shutdown.getAndSet(true)) { + throw new IllegalStateException("This executor has been shutdown already"); + } + } + + @Override + public List shutdownNow() { + if (shutdown.getAndSet(true)) { + throw new IllegalStateException("This executor has been shutdown already"); + } + List pending = new ArrayList<>(); + for (final PendingCallable pendingCallable : pendingCallables) { + pending.add( + new Runnable() { + @Override + public void run() { + pendingCallable.call(); + } + }); + } + synchronized (pendingCallables) { + pendingCallables.notifyAll(); + pendingCallables.clear(); + } + return pending; + } + + @Override + public boolean isShutdown() { + return shutdown.get(); + } + + @Override + public boolean isTerminated() { + return pendingCallables.isEmpty(); + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + synchronized (pendingCallables) { + if (pendingCallables.isEmpty()) { + return true; + } + LOG.info("Wating on pending callables" + pendingCallables.size()); + pendingCallables.wait(unit.toMillis(timeout)); + return pendingCallables.isEmpty(); + } + } + + @Override + public void execute(Runnable command) { + if (shutdown.get()) { + throw new IllegalStateException("This executor has been shutdown"); + } + command.run(); + } + + ScheduledFuture schedulePendingCallable(PendingCallable callable) { + LOG.info( + "Schedule pending callable called " + callable.delay + " " + callable.getScheduledTime()); + if (shutdown.get()) { + throw new IllegalStateException("This executor has been shutdown"); + } + synchronized (pendingCallables) { + pendingCallables.add(callable); + } + work(); + synchronized (expectedWorkQueue) { + // We compare by the callable delay in order decide when to remove expectations from the + // expected work queue, i.e. only the expected work that matches the delay of the scheduled + // callable is removed from the queue. + if (!expectedWorkQueue.isEmpty() && expectedWorkQueue.peek().equals(callable.delay)) { + expectedWorkQueue.poll(); + } + expectedWorkQueue.notifyAll(); + } + + return callable.getScheduledFuture(); + } + + enum PendingCallableType { + NORMAL, + FIXED_RATE, + FIXED_DELAY + } + + /** Class that saves the state of an scheduled pending callable. */ + class PendingCallable implements Comparable> { + Instant creationTime = Instant.ofEpochMilli(clock.millisTime()); + Duration delay; + Callable pendingCallable; + SettableFuture future = SettableFuture.create(); + AtomicBoolean cancelled = new AtomicBoolean(false); + AtomicBoolean done = new AtomicBoolean(false); + PendingCallableType type; + + PendingCallable(Duration delay, final Runnable runnable, PendingCallableType type) { + pendingCallable = + new Callable() { + @Override + public T call() { + runnable.run(); + return null; + } + }; + this.type = type; + this.delay = delay; + } + + PendingCallable(Duration delay, Callable callable, PendingCallableType type) { + pendingCallable = callable; + this.type = type; + this.delay = delay; + } + + private Instant getScheduledTime() { + return creationTime.plus(delay); + } + + private Instant getCreationTime() { + return creationTime; + } + + ScheduledFuture getScheduledFuture() { + return new ScheduledFuture() { + @Override + public long getDelay(TimeUnit unit) { + return unit.convert( + getScheduledTime().toEpochMilli() - clock.millisTime(), TimeUnit.MILLISECONDS); + } + + @Override + public int compareTo(Delayed o) { + return Ints.saturatedCast( + getDelay(TimeUnit.MILLISECONDS) - o.getDelay(TimeUnit.MILLISECONDS)); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + synchronized (this) { + cancelled.set(true); + return !done.get(); + } + } + + @Override + public boolean isCancelled() { + return cancelled.get(); + } + + @Override + public boolean isDone() { + return done.get(); + } + + @Override + public T get() throws InterruptedException, ExecutionException { + return future.get(); + } + + @Override + public T get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + return future.get(timeout, unit); + } + }; + } + + T call() { + T result = null; + synchronized (this) { + if (cancelled.get()) { + return null; + } + try { + result = pendingCallable.call(); + future.set(result); + } catch (Exception e) { + future.setException(e); + } finally { + switch (type) { + case NORMAL: + done.set(true); + break; + case FIXED_DELAY: + this.creationTime = Instant.ofEpochMilli(clock.millisTime()); + schedulePendingCallable(this); + break; + case FIXED_RATE: + this.creationTime = this.creationTime.plus(delay); + schedulePendingCallable(this); + break; + default: + // Nothing to do + } + } + } + return result; + } + + @Override + public int compareTo(PendingCallable other) { + return getScheduledTime().compareTo(other.getScheduledTime()); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java new file mode 100644 index 000000000000..fa55a2bc9548 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java @@ -0,0 +1,1683 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.api.client.util.Sleeper; +import com.google.api.core.ApiFuture; +import com.google.api.gax.batching.FlowControlSettings; +import com.google.api.gax.batching.FlowController; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.cloud.bigquery.storage.test.JsonTest; +import com.google.cloud.bigquery.storage.test.SchemaTest; +import com.google.cloud.bigquery.storage.test.Test.FlexibleType; +import com.google.cloud.bigquery.storage.test.Test.FooType; +import com.google.cloud.bigquery.storage.test.Test.RepetitionType; +import com.google.cloud.bigquery.storage.test.Test.UpdatedFooType; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation; +import com.google.cloud.bigquery.storage.v1.ConnectionWorkerPool.Settings; +import com.google.cloud.bigquery.storage.v1.Exceptions.AppendSerializationError; +import com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode; +import com.google.common.collect.ImmutableMap; +import com.google.gson.JsonArray; +import com.google.gson.JsonObject; +import com.google.protobuf.ByteString; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import com.google.protobuf.Int64Value; +import com.google.protobuf.Timestamp; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.time.Instant; +import java.time.LocalTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.json.JSONArray; +import org.json.JSONObject; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class JsonStreamWriterTest { + + private static final int NUMERIC_SCALE = 9; + private static final String TEST_STREAM = "projects/p/datasets/d/tables/t/streams/_default"; + private static final String TEST_STREAM_2 = "projects/p/datasets/d2/tables/t2/streams/_default"; + private static final String TEST_TABLE = "projects/p/datasets/d/tables/t"; + private static final String TEST_TABLE_DEFAULT = "projects/p/datasets/d/tables/t/_default"; + private static LocalChannelProvider channelProvider; + private FakeScheduledExecutorService fakeExecutor; + private FakeBigQueryWrite testBigQueryWrite; + private static MockServiceHelper serviceHelper; + private BigQueryWriteClient client; + + private final TableFieldSchema FOO = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("foo") + .build(); + private final TableFieldSchema BAR = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("bar") + .build(); + private final TableFieldSchema BAZ = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("baz") + .build(); + + private final TableSchema TABLE_SCHEMA = TableSchema.newBuilder().addFields(0, FOO).build(); + private final TableSchema TABLE_SCHEMA_2 = TableSchema.newBuilder().addFields(0, BAZ).build(); + + private final TableSchema UPDATED_TABLE_SCHEMA = + TableSchema.newBuilder().addFields(0, FOO).addFields(1, BAR).build(); + private final TableSchema UPDATED_TABLE_SCHEMA_2 = + TableSchema.newBuilder().addFields(0, FOO).addFields(1, BAR).addFields(2, BAZ).build(); + private final ProtoSchema PROTO_SCHEMA = + ProtoSchemaConverter.convert( + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(TABLE_SCHEMA)); + private final ProtoSchema PROTO_SCHEMA_2 = + ProtoSchemaConverter.convert( + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(TABLE_SCHEMA_2)); + private final ProtoSchema UPDATED_PROTO_SCHEMA = + ProtoSchemaConverter.convert( + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor( + UPDATED_TABLE_SCHEMA)); + + private final TableFieldSchema TEST_INT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INT64) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_int") + .build(); + private final TableFieldSchema TEST_STRING = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_string") + .build(); + + JsonStreamWriterTest() throws DescriptorValidationException {} + + @BeforeEach + void setUp() throws Exception { + testBigQueryWrite = new FakeBigQueryWrite(); + serviceHelper = + new MockServiceHelper(UUID.randomUUID().toString(), Arrays.asList(testBigQueryWrite)); + serviceHelper.start(); + channelProvider = serviceHelper.createChannelProvider(); + fakeExecutor = new FakeScheduledExecutorService(); + testBigQueryWrite.setExecutor(fakeExecutor); + BigQueryWriteSettings settings = + BigQueryWriteSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = BigQueryWriteClient.create(settings); + Instant time = Instant.now(); + Timestamp timestamp = + Timestamp.newBuilder().setSeconds(time.getEpochSecond()).setNanos(time.getNano()).build(); + StreamWriter.cleanUp(); + } + + @AfterEach + void tearDown() throws Exception { + serviceHelper.stop(); + + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + + private JsonStreamWriter.Builder getTestJsonStreamWriterBuilder( + String testStream, TableSchema BQTableSchema) { + return JsonStreamWriter.newBuilder(testStream, BQTableSchema, client) + .setChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setExecutorProvider(InstantiatingExecutorProvider.newBuilder().build()); + } + + private JsonStreamWriter.Builder getTestJsonStreamWriterBuilder(String testStream) { + return JsonStreamWriter.newBuilder(testStream, client) + .setChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setExecutorProvider(InstantiatingExecutorProvider.newBuilder().build()); + } + + @Test + void testTwoParamNewBuilder_nullSchema() { + NullPointerException e = + assertThrows( + NullPointerException.class, () -> getTestJsonStreamWriterBuilder(null, TABLE_SCHEMA)); + assertEquals(e.getMessage(), "StreamOrTableName is null."); + } + + @Test + void testTwoParamNewBuilder_nullStream() { + NullPointerException e = + assertThrows( + NullPointerException.class, () -> getTestJsonStreamWriterBuilder(TEST_STREAM, null)); + assertEquals(e.getMessage(), "TableSchema is null."); + } + + @Test + void testTwoParamNewBuilder() + throws DescriptorValidationException, IOException, InterruptedException { + JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build(); + assertEquals(TEST_STREAM, writer.getStreamName()); + } + + @Test + void testConstructWriterUsingDefaultStreamName() + throws DescriptorValidationException, IOException, InterruptedException { + JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_TABLE_DEFAULT, TABLE_SCHEMA).build(); + assertEquals(TEST_TABLE_DEFAULT, writer.getStreamName()); + } + + @Test + void testSingleAppendSimpleJson() throws Exception { + FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); + JSONObject foo = new JSONObject(); + foo.put("foo", "allen"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA) + .setTraceId("test:empty") + .build()) { + + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .build()); + + ApiFuture appendFuture = writer.append(jsonArr); + assertEquals(0L, appendFuture.get().getAppendResult().getOffset().getValue()); + appendFuture.get(); + assertEquals( + 1, + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRows(0), + expectedProto.toByteString()); + assertEquals( + "java-jsonwriter test:empty", testBigQueryWrite.getAppendRequests().get(0).getTraceId()); + } + } + + @Test + void testSingleAppendSimpleGson() throws Exception { + FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); + JsonObject foo = new JsonObject(); + foo.addProperty("foo", "allen"); + JsonArray jsonArr = new JsonArray(); + jsonArr.add(foo); + + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA) + .setTraceId("test:empty") + .build()) { + + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .build()); + + ApiFuture appendFuture = writer.append(jsonArr); + assertEquals(0L, appendFuture.get().getAppendResult().getOffset().getValue()); + appendFuture.get(); + assertEquals( + 1, + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRows(0), + expectedProto.toByteString()); + assertEquals( + "java-jsonwriter test:empty", testBigQueryWrite.getAppendRequests().get(0).getTraceId()); + } + } + + @Test + void testFlexibleColumnAppend() throws Exception { + TableFieldSchema field = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test-列") + .build(); + TableSchema tableSchema = TableSchema.newBuilder().addFields(0, field).build(); + FlexibleType expectedProto = FlexibleType.newBuilder().setColDGVzdC3LiJc("allen").build(); + JSONObject flexible = new JSONObject(); + flexible.put("test-列", "allen"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(flexible); + + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, tableSchema).build()) { + + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .build()); + + ApiFuture appendFuture = writer.append(jsonArr); + assertEquals(0L, appendFuture.get().getAppendResult().getOffset().getValue()); + appendFuture.get(); + assertEquals( + 1, + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRows(0), + expectedProto.toByteString()); + assertEquals("java-jsonwriter", testBigQueryWrite.getAppendRequests().get(0).getTraceId()); + } + } + + @Test + void testSpecialTypeAppend() throws Exception { + TableFieldSchema field = + TableFieldSchema.newBuilder() + .setName("time") + .setType(TableFieldSchema.Type.TIME) + .setMode(TableFieldSchema.Mode.REPEATED) + .build(); + TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); + + JsonTest.TestTime expectedProto = + JsonTest.TestTime.newBuilder() + .addTime(CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(1, 0, 1))) + .build(); + JSONObject foo = new JSONObject(); + foo.put("time", new JSONArray(new String[] {"01:00:01"})); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, tableSchema).build()) { + + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .build()); + + ApiFuture appendFuture = writer.append(jsonArr); + assertEquals(0L, appendFuture.get().getAppendResult().getOffset().getValue()); + appendFuture.get(); + assertEquals( + 1, + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRows(0), + expectedProto.toByteString()); + } + } + + @Test + void testRepeatedByteStringAppend() throws Exception { + TableFieldSchema NON_REPEATED_A = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.REQUIRED) + .setName("a") + .build(); + + TableFieldSchema NON_REPEATED_B = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BYTES) + .setMode(TableFieldSchema.Mode.REQUIRED) + .setName("b") + .build(); + + TableFieldSchema NON_REPEATED_C = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BYTES) + .setMode(TableFieldSchema.Mode.REQUIRED) + .setName("c") + .build(); + + TableFieldSchema REPEATED_A = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("aa") + .build(); + + TableFieldSchema REPEATED_B = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BYTES) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("bb") + .build(); + + TableFieldSchema REPEATED_C = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BYTES) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("cc") + .build(); + + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields(0, NON_REPEATED_A) + .addFields(1, NON_REPEATED_B) + .addFields(2, NON_REPEATED_C) + .addFields(3, REPEATED_A) + .addFields(4, REPEATED_B) + .addFields(5, REPEATED_C) + .build(); + + BigDecimal bigDecimal1 = new BigDecimal(1.1); + if (bigDecimal1.scale() > NUMERIC_SCALE) { + bigDecimal1 = bigDecimal1.setScale(NUMERIC_SCALE, RoundingMode.HALF_UP); + } + BigDecimal bigDecimal2 = new BigDecimal(2.2); + if (bigDecimal2.scale() > NUMERIC_SCALE) { + bigDecimal2 = bigDecimal2.setScale(NUMERIC_SCALE, RoundingMode.HALF_UP); + } + JSONArray aaValue = new JSONArray(); + aaValue.put(BigDecimalByteStringEncoder.encodeToNumericByteString(bigDecimal1)); + aaValue.put(BigDecimalByteStringEncoder.encodeToNumericByteString(bigDecimal2)); + + byte[] byteArray1 = "bb1".getBytes("UTF-8"); + byte[] byteArray2 = "bb2".getBytes("UTF-8"); + JSONArray bbValue = new JSONArray(); + bbValue.put(ByteString.copyFrom(byteArray1)); + bbValue.put(ByteString.copyFrom(byteArray2)); + + ByteString byteString1 = ByteString.copyFrom("cc1", "UTF-8"); + ByteString byteString2 = ByteString.copyFrom("cc2", "UTF-8"); + JSONArray ccValue = new JSONArray(); + ccValue.put(byteString1); + ccValue.put(byteString2); + + JSONObject foo = new JSONObject(); + foo.put("a", BigDecimalByteStringEncoder.encodeToNumericByteString(bigDecimal1)); + foo.put("b", ByteString.copyFrom(byteArray1)); + foo.put("c", byteString1); + foo.put("aa", aaValue); + foo.put("bb", bbValue); + foo.put("cc", ccValue); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + + RepetitionType expectedProto = + RepetitionType.newBuilder() + .setA(BigDecimalByteStringEncoder.encodeToNumericByteString(bigDecimal1)) + .setB(ByteString.copyFrom(byteArray1)) + .setC(byteString1) + .addAa(BigDecimalByteStringEncoder.encodeToNumericByteString(bigDecimal1)) + .addAa(BigDecimalByteStringEncoder.encodeToNumericByteString(bigDecimal2)) + .addBb(ByteString.copyFrom(byteArray1)) + .addBb(ByteString.copyFrom(byteArray2)) + .addCc(byteString1) + .addCc(byteString2) + .build(); + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, tableSchema).build()) { + + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .build()); + + ApiFuture appendFuture = writer.append(jsonArr); + assertEquals(0L, appendFuture.get().getAppendResult().getOffset().getValue()); + appendFuture.get(); + assertEquals( + 1, + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRows(0), + expectedProto.toByteString()); + } + } + + @Test + void testSingleAppendMultipleSimpleJson() throws Exception { + FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); + JSONObject foo = new JSONObject(); + foo.put("foo", "allen"); + JSONObject foo1 = new JSONObject(); + foo1.put("foo", "allen"); + JSONObject foo2 = new JSONObject(); + foo2.put("foo", "allen"); + JSONObject foo3 = new JSONObject(); + foo3.put("foo", "allen"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + jsonArr.put(foo1); + jsonArr.put(foo2); + jsonArr.put(foo3); + + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .build()); + + ApiFuture appendFuture = writer.append(jsonArr); + + assertEquals(0L, appendFuture.get().getAppendResult().getOffset().getValue()); + appendFuture.get(); + assertEquals( + 4, + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals("java-jsonwriter", testBigQueryWrite.getAppendRequests().get(0).getTraceId()); + for (int i = 0; i < 4; i++) { + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRows(i), + expectedProto.toByteString()); + } + assertEquals( + testBigQueryWrite.getAppendRequests().get(0).getDefaultMissingValueInterpretation(), + MissingValueInterpretation.MISSING_VALUE_INTERPRETATION_UNSPECIFIED); + } + } + + @Test + void testMultipleAppendSimpleJson() throws Exception { + FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); + JSONObject foo = new JSONObject(); + foo.put("foo", "allen"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .build()); + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(1)).build()) + .build()); + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(2)).build()) + .build()); + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(3)).build()) + .build()); + ApiFuture appendFuture; + for (int i = 0; i < 4; i++) { + appendFuture = writer.append(jsonArr); + assertEquals((long) i, appendFuture.get().getAppendResult().getOffset().getValue()); + appendFuture.get(); + assertEquals( + 1, + testBigQueryWrite + .getAppendRequests() + .get(i) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(i) + .getProtoRows() + .getRows() + .getSerializedRows(0), + expectedProto.toByteString()); + } + } + } + + @Test + void testAppendOutOfRangeException() throws Exception { + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setError(com.google.rpc.Status.newBuilder().setCode(11).build()) + .build()); + JSONObject foo = new JSONObject(); + foo.put("foo", "allen"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + ApiFuture appendFuture = writer.append(jsonArr); + ExecutionException ex = assertThrows(ExecutionException.class, () -> appendFuture.get()); + assertEquals(ex.getCause().getMessage(), "OUT_OF_RANGE: "); + } + } + + @Test + void testCreateDefaultStream_withNoSchemaPassedIn() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder().addFields(0, TEST_INT).addFields(1, TEST_STRING).build(); + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setLocation("aa") + .setTableSchema(tableSchema) + .build()); + try (JsonStreamWriter writer = + JsonStreamWriter.newBuilder(TEST_TABLE, client) + .setChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setExecutorProvider(InstantiatingExecutorProvider.newBuilder().build()) + .build()) { + assertEquals("projects/p/datasets/d/tables/t/_default", writer.getStreamName()); + assertEquals("aa", writer.getLocation()); + } + } + + @Test + void testCreateDefaultStream_withNoClientPassedIn() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder().addFields(0, TEST_INT).addFields(1, TEST_STRING).build(); + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setLocation("aa") + .setTableSchema(tableSchema) + .build()); + try (JsonStreamWriter writer = + JsonStreamWriter.newBuilder(TEST_TABLE, tableSchema) + .setChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setExecutorProvider(InstantiatingExecutorProvider.newBuilder().build()) + .setEnableConnectionPool(true) + .build()) { + assertEquals("projects/p/datasets/d/tables/t/_default", writer.getStreamName()); + assertEquals("aa", writer.getLocation()); + + JsonStreamWriter recreate = + JsonStreamWriter.newBuilder(writer.getStreamName(), tableSchema) + .setChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setExecutorProvider(InstantiatingExecutorProvider.newBuilder().build()) + .setEnableConnectionPool(true) + .build(); + } + } + + @Test + void testCreateDefaultStreamWrongLocation() { + TableSchema tableSchema = + TableSchema.newBuilder().addFields(0, TEST_INT).addFields(1, TEST_STRING).build(); + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setLocation("aa") + .setTableSchema(tableSchema) + .build()); + IllegalArgumentException ex = + assertThrows( + IllegalArgumentException.class, + () -> { + JsonStreamWriter.newBuilder(TEST_TABLE, client) + .setChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setLocation("bb") + .build(); + }); + assertEquals("Specified location bb does not match the system value aa", ex.getMessage()); + } + + @Test + void testSimpleSchemaUpdate() throws Exception { + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setTableSchema(TABLE_SCHEMA) + .setLocation("us") + .build()); + try (JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_STREAM).build()) { + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .setUpdatedSchema(UPDATED_TABLE_SCHEMA) + .build()); + testBigQueryWrite.addResponse(createAppendResponse(1)); + testBigQueryWrite.addResponse(createAppendResponse(2)); + testBigQueryWrite.addResponse(createAppendResponse(3)); + // First append + JSONObject foo = new JSONObject(); + foo.put("foo", "aaa"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + + ApiFuture appendFuture1 = writer.append(jsonArr); + ApiFuture appendFuture2 = writer.append(jsonArr); + ApiFuture appendFuture3 = writer.append(jsonArr); + + assertEquals(0L, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1L, appendFuture2.get().getAppendResult().getOffset().getValue()); + assertEquals( + 1, + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRows(0), + FooType.newBuilder().setFoo("aaa").build().toByteString()); + + assertEquals(2L, appendFuture3.get().getAppendResult().getOffset().getValue()); + assertEquals( + 1, + testBigQueryWrite + .getAppendRequests() + .get(1) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(1) + .getProtoRows() + .getRows() + .getSerializedRows(0), + FooType.newBuilder().setFoo("aaa").build().toByteString()); + + // Second append with updated schema. + JSONObject updatedFoo = new JSONObject(); + updatedFoo.put("foo", "aaa"); + updatedFoo.put("bar", "bbb"); + JSONArray updatedJsonArr = new JSONArray(); + updatedJsonArr.put(updatedFoo); + ApiFuture appendFuture4 = writer.append(updatedJsonArr); + + assertEquals(3L, appendFuture4.get().getAppendResult().getOffset().getValue()); + assertEquals(4, testBigQueryWrite.getAppendRequests().size()); + assertEquals( + 1, + testBigQueryWrite + .getAppendRequests() + .get(3) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(3) + .getProtoRows() + .getRows() + .getSerializedRows(0), + UpdatedFooType.newBuilder().setFoo("aaa").setBar("bbb").build().toByteString()); + + assertTrue(testBigQueryWrite.getAppendRequests().get(0).getProtoRows().hasWriterSchema()); + assertTrue( + testBigQueryWrite.getAppendRequests().get(2).getProtoRows().hasWriterSchema() + || testBigQueryWrite.getAppendRequests().get(3).getProtoRows().hasWriterSchema()); + } + } + + @Test + void testSimpleSchemaUpdate_skipRefreshWriterIfSchemaProvided() throws Exception { + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setTableSchema(TABLE_SCHEMA) + .setLocation("us") + .build()); + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .setUpdatedSchema(UPDATED_TABLE_SCHEMA) + .build()); + testBigQueryWrite.addResponse(createAppendResponse(1)); + testBigQueryWrite.addResponse(createAppendResponse(2)); + testBigQueryWrite.addResponse(createAppendResponse(3)); + // First append + JSONObject foo = new JSONObject(); + foo.put("foo", "aaa"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + + ApiFuture appendFuture1 = writer.append(jsonArr); + ApiFuture appendFuture2 = writer.append(jsonArr); + ApiFuture appendFuture3 = writer.append(jsonArr); + + assertEquals(0L, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1L, appendFuture2.get().getAppendResult().getOffset().getValue()); + assertEquals( + 1, + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(0) + .getProtoRows() + .getRows() + .getSerializedRows(0), + FooType.newBuilder().setFoo("aaa").build().toByteString()); + + assertEquals(2L, appendFuture3.get().getAppendResult().getOffset().getValue()); + assertEquals( + 1, + testBigQueryWrite + .getAppendRequests() + .get(1) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(1) + .getProtoRows() + .getRows() + .getSerializedRows(0), + FooType.newBuilder().setFoo("aaa").build().toByteString()); + + // Second append with updated schema. + JSONObject updatedFoo = new JSONObject(); + updatedFoo.put("foo", "aaa"); + updatedFoo.put("bar", "bbb"); + JSONArray updatedJsonArr = new JSONArray(); + updatedJsonArr.put(updatedFoo); + + // Schema update will not happen for writer that has schema explicitly provided. + assertThrows( + AppendSerializationError.class, + () -> { + ApiFuture appendFuture4 = writer.append(updatedJsonArr); + }); + } + } + + @Test + void testSimpleSchemaUpdate_withInterpretationMap() throws Exception { + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setTableSchema(TABLE_SCHEMA) + .setLocation("us") + .build()); + Map missingValueMap = new HashMap<>(); + missingValueMap.put("col1", AppendRowsRequest.MissingValueInterpretation.NULL_VALUE); + missingValueMap.put("col3", AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE); + + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM) + .setDefaultMissingValueInterpretation(MissingValueInterpretation.DEFAULT_VALUE) + .setMissingValueInterpretationMap(missingValueMap) + .build()) { + + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .setUpdatedSchema(UPDATED_TABLE_SCHEMA) + .build()); + testBigQueryWrite.addResponse(createAppendResponse(1)); + // Verify the map before the writer is refreshed + assertEquals(missingValueMap, writer.getMissingValueInterpretationMap()); + testBigQueryWrite.addResponse(createAppendResponse(2)); + testBigQueryWrite.addResponse(createAppendResponse(3)); + + // First batch of appends. First append request will return an updated-schema, but the second + // and maybe the third append will be processed before the first response will refresh the + // StreamWriter. + JSONObject foo = new JSONObject(); + foo.put("foo", "aaa"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + + ApiFuture appendFuture1 = writer.append(jsonArr); + ApiFuture appendFuture2 = writer.append(jsonArr); + ApiFuture appendFuture3 = writer.append(jsonArr); + + assertEquals(0L, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1L, appendFuture2.get().getAppendResult().getOffset().getValue()); + assertEquals(2L, appendFuture3.get().getAppendResult().getOffset().getValue()); + + // Another append, this time with columns to match the updated schema. + JSONObject updatedFoo = new JSONObject(); + updatedFoo.put("foo", "aaa"); + updatedFoo.put("bar", "bbb"); + JSONArray updatedJsonArr = new JSONArray(); + updatedJsonArr.put(updatedFoo); + ApiFuture appendFuture4 = writer.append(updatedJsonArr); + + assertEquals(3L, appendFuture4.get().getAppendResult().getOffset().getValue()); + assertEquals(4, testBigQueryWrite.getAppendRequests().size()); + assertEquals( + 1, + testBigQueryWrite + .getAppendRequests() + .get(3) + .getProtoRows() + .getRows() + .getSerializedRowsCount()); + assertEquals( + testBigQueryWrite + .getAppendRequests() + .get(3) + .getProtoRows() + .getRows() + .getSerializedRows(0), + UpdatedFooType.newBuilder().setFoo("aaa").setBar("bbb").build().toByteString()); + + assertTrue(testBigQueryWrite.getAppendRequests().get(0).getProtoRows().hasWriterSchema()); + assertTrue( + testBigQueryWrite.getAppendRequests().get(2).getProtoRows().hasWriterSchema() + || testBigQueryWrite.getAppendRequests().get(3).getProtoRows().hasWriterSchema()); + + // Verify the map after the writer is refreshed + assertEquals(missingValueMap, writer.getMissingValueInterpretationMap()); + assertEquals( + testBigQueryWrite.getAppendRequests().get(3).getDefaultMissingValueInterpretation(), + MissingValueInterpretation.DEFAULT_VALUE); + assertEquals( + testBigQueryWrite.getAppendRequests().get(3).getMissingValueInterpretations(), + missingValueMap); + } + } + + @Test + void testWithoutIgnoreUnknownFieldsUpdateImmeidateSuccess() throws Exception { + TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).build(); + TableSchema updatedSchema = + TableSchema.newBuilder() + .addFields(0, TEST_INT) + .addFields( + 1, + TableFieldSchema.newBuilder() + .setName("test_string") + .setType(TableFieldSchema.Type.STRING) + .setMode(Mode.NULLABLE)) + .build(); + + // GetWriteStream is called once and got the updated schema + testBigQueryWrite.addResponse( + WriteStream.newBuilder().setName(TEST_STREAM).setTableSchema(tableSchema).build()); + // GetWriteStream is called once and the writer is fixed to accept unknown fields. + testBigQueryWrite.addResponse( + WriteStream.newBuilder().setName(TEST_STREAM).setTableSchema(updatedSchema).build()); + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .build()); + try (JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_STREAM).build()) { + JSONObject foo = new JSONObject(); + foo.put("test_int", 10); + JSONObject bar = new JSONObject(); + bar.put("test_string", "a"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + jsonArr.put(bar); + ApiFuture appendFuture = writer.append(jsonArr); + appendFuture.get(); + } + } + + @Test + void testWithoutIgnoreUnknownFieldsUpdateSecondSuccess() throws Exception { + TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).build(); + TableSchema updatedSchema = + TableSchema.newBuilder() + .addFields(0, TEST_INT) + .addFields( + 1, + TableFieldSchema.newBuilder() + .setName("test_string") + .setType(TableFieldSchema.Type.STRING) + .setMode(Mode.NULLABLE)) + .build(); + // GetWriteStream is called once and got the updated schema + testBigQueryWrite.addResponse( + WriteStream.newBuilder().setName(TEST_STREAM).setTableSchema(TABLE_SCHEMA).build()); + testBigQueryWrite.addResponse( + WriteStream.newBuilder().setName(TEST_STREAM).setTableSchema(updatedSchema).build()); + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .build()); + try (JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_STREAM).build()) { + JSONObject foo = new JSONObject(); + foo.put("test_int", 10); + JSONObject bar = new JSONObject(); + bar.put("test_string", "a"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + jsonArr.put(bar); + ApiFuture appendFuture = writer.append(jsonArr); + appendFuture.get(); + } + } + + @Test + void testSchemaUpdateInMultiplexing_singleConnection() throws Exception { + // Set min connection count to be 1 to force sharing connection. + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); + // GetWriteStream is called twice and got the updated schema + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setTableSchema(TABLE_SCHEMA) + .setLocation("us") + .build()); + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setTableSchema(TABLE_SCHEMA_2) + .setLocation("us") + .build()); + // The following two writers have different stream name and schema, but will share the same + // connection . + JsonStreamWriter writer1 = + getTestJsonStreamWriterBuilder(TEST_STREAM) + .setEnableConnectionPool(true) + .setLocation("us") + .build(); + JsonStreamWriter writer2 = + getTestJsonStreamWriterBuilder(TEST_STREAM_2) + .setEnableConnectionPool(true) + .setLocation("us") + .build(); + + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .setUpdatedSchema(UPDATED_TABLE_SCHEMA) + .setWriteStream(TEST_STREAM) + .build()); + testBigQueryWrite.addResponse(createAppendResponse(1)); + testBigQueryWrite.addResponse(createAppendResponse(2)); + testBigQueryWrite.addResponse(createAppendResponse(3)); + // Append request with old schema for writer 1. + JSONObject foo = new JSONObject(); + foo.put("foo", "aaa"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + + // Append request with old schema for writer 2. + JSONObject baz = new JSONObject(); + baz.put("baz", "bbb"); + JSONArray jsonArr2 = new JSONArray(); + jsonArr2.put(baz); + + // Append request with new schema. + JSONObject updatedFoo = new JSONObject(); + updatedFoo.put("foo", "aaa"); + updatedFoo.put("bar", "bbb"); + JSONArray updatedJsonArr = new JSONArray(); + updatedJsonArr.put(updatedFoo); + + // This append will trigger new schema update. + ApiFuture appendFuture1 = writer1.append(jsonArr); + // This append be put onto the same connection as the first one. + ApiFuture appendFuture2 = writer2.append(jsonArr2); + + // Sleep for a small period of time to make sure the updated schema is stored. + Sleeper.DEFAULT.sleep(300); + // Back to writer1 here, we are expected to use the updated schema already. + // Both of the following append will be parsed correctly. + ApiFuture appendFuture3 = writer1.append(updatedJsonArr); + ApiFuture appendFuture4 = writer1.append(jsonArr); + + assertEquals(0L, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1L, appendFuture2.get().getAppendResult().getOffset().getValue()); + assertEquals(2L, appendFuture3.get().getAppendResult().getOffset().getValue()); + assertEquals(3L, appendFuture4.get().getAppendResult().getOffset().getValue()); + + // The 1st schema comes from writer1's initial schema + assertEquals( + testBigQueryWrite.getAppendRequests().get(0).getProtoRows().getWriterSchema(), + PROTO_SCHEMA); + // The 2nd schema comes from writer2's initial schema + assertEquals( + testBigQueryWrite.getAppendRequests().get(1).getProtoRows().getWriterSchema(), + PROTO_SCHEMA_2); + // The 3rd schema comes from writer1's updated schema + assertEquals( + testBigQueryWrite.getAppendRequests().get(2).getProtoRows().getWriterSchema(), + UPDATED_PROTO_SCHEMA); + // The 4th schema should be empty as schema update is already done for writer 1. + assertEquals( + testBigQueryWrite.getAppendRequests().get(3).getProtoRows().getWriterSchema(), + ProtoSchema.getDefaultInstance()); + writer1.close(); + writer2.close(); + } + + @Test + void testMissingValueInterpretation_multiplexingCase() throws Exception { + // Set min connection count to be 1 to force sharing connection. + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setTableSchema(TABLE_SCHEMA) + .setLocation("us") + .build()); + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setTableSchema(TABLE_SCHEMA) + .setLocation("us") + .build()); + // The following two writers have different stream name and schema, but will share the same + // connection . + JsonStreamWriter writer1 = + getTestJsonStreamWriterBuilder(TEST_STREAM) + .setEnableConnectionPool(true) + .setLocation("us") + .setDefaultMissingValueInterpretation(MissingValueInterpretation.DEFAULT_VALUE) + .build(); + JsonStreamWriter writer2 = + getTestJsonStreamWriterBuilder(TEST_STREAM_2) + .setEnableConnectionPool(true) + .setLocation("us") + .setDefaultMissingValueInterpretation(MissingValueInterpretation.NULL_VALUE) + .build(); + + long appendCountPerStream = 5; + for (int i = 0; i < appendCountPerStream * 4; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + JSONObject foo = new JSONObject(); + foo.put("foo", "aaa"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + List> futures = new ArrayList<>(); + // In total insert append `appendCountPerStream` * 4 requests. + // We insert using the pattern of + // jsonStreamWriter1, jsonStreamWriter1, jsonStreamWriter2, jsonStreamWriter2 + for (int i = 0; i < appendCountPerStream; i++) { + ApiFuture appendFuture1 = writer1.append(jsonArr); + ApiFuture appendFuture2 = writer1.append(jsonArr); + ApiFuture appendFuture3 = writer2.append(jsonArr); + ApiFuture appendFuture4 = writer2.append(jsonArr); + appendFuture1.get(); + appendFuture2.get(); + appendFuture3.get(); + appendFuture4.get(); + } + + for (int i = 0; i < appendCountPerStream * 4; i++) { + AppendRowsRequest appendRowsRequest = testBigQueryWrite.getAppendRequests().get(i); + if (i % 4 <= 1) { + assertEquals( + appendRowsRequest.getDefaultMissingValueInterpretation(), + MissingValueInterpretation.DEFAULT_VALUE); + } else { + assertEquals( + appendRowsRequest.getDefaultMissingValueInterpretation(), + MissingValueInterpretation.NULL_VALUE); + } + } + + writer1.close(); + writer2.close(); + } + + @Test + void testSchemaUpdateInMultiplexing_multipleWriterForSameStreamName() throws Exception { + // Set min connection count to be 1 to force sharing connection. + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); + + // GetWriteStream is called twice and got the updated schema + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setTableSchema(TABLE_SCHEMA) + .setLocation("us") + .build()); + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setTableSchema(TABLE_SCHEMA) + .setLocation("us") + .build()); + // Create two writers writing to the same stream. + JsonStreamWriter writer1 = + getTestJsonStreamWriterBuilder(TEST_STREAM) + .setEnableConnectionPool(true) + .setLocation("us") + .build(); + JsonStreamWriter writer2 = + getTestJsonStreamWriterBuilder(TEST_STREAM) + .setEnableConnectionPool(true) + .setLocation("us") + .build(); + + // Trigger schema update in the second request. + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(1)).build()) + .setUpdatedSchema(UPDATED_TABLE_SCHEMA) + .setWriteStream(TEST_STREAM) + .build()); + testBigQueryWrite.addResponse(createAppendResponse(2)); + testBigQueryWrite.addResponse(createAppendResponse(3)); + // Append request with old schema. + JSONObject foo = new JSONObject(); + foo.put("foo", "aaa"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + + // Append request with new schema. + JSONObject updatedFoo = new JSONObject(); + updatedFoo.put("foo", "aaa"); + updatedFoo.put("bar", "bbb"); + JSONArray updatedJsonArr = new JSONArray(); + updatedJsonArr.put(updatedFoo); + + // Normal append, nothing happens + ApiFuture appendFuture1 = writer1.append(jsonArr); + // This append triggers updated schema + ApiFuture appendFuture2 = writer2.append(jsonArr); + + // Sleep for a small period of time to make sure the updated schema is stored. + Sleeper.DEFAULT.sleep(300); + // From now on everyone should be able to use the new schema. + ApiFuture appendFuture3 = writer1.append(updatedJsonArr); + ApiFuture appendFuture4 = writer2.append(updatedJsonArr); + + assertEquals(0L, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1L, appendFuture2.get().getAppendResult().getOffset().getValue()); + assertEquals(2L, appendFuture3.get().getAppendResult().getOffset().getValue()); + assertEquals(3L, appendFuture4.get().getAppendResult().getOffset().getValue()); + + // The 1st schema comes from writer1's initial schema + assertEquals( + testBigQueryWrite.getAppendRequests().get(0).getProtoRows().getWriterSchema(), + PROTO_SCHEMA); + // The 2nd append trigger no schema change. + assertEquals( + testBigQueryWrite.getAppendRequests().get(1).getProtoRows().getWriterSchema(), + ProtoSchema.getDefaultInstance()); + assertEquals( + testBigQueryWrite.getAppendRequests().get(2).getProtoRows().getWriterSchema(), + UPDATED_PROTO_SCHEMA); + // The next request after schema update will back to empty. + assertEquals( + testBigQueryWrite.getAppendRequests().get(3).getProtoRows().getWriterSchema(), + ProtoSchema.getDefaultInstance()); + writer1.close(); + writer2.close(); + } + + @Test + void testSchemaUpdateInMultiplexing_IgnoreUpdateIfTimeStampNewer() throws Exception { + // Set min connection count to be 1 to force sharing connection. + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); + testBigQueryWrite.addResponse( + WriteStream.newBuilder() + .setName(TEST_STREAM) + .setTableSchema(TABLE_SCHEMA) + .setLocation("us") + .build()); + // The following two writers have different stream name and schema, but will share the same + // connection. + JsonStreamWriter writer1 = + getTestJsonStreamWriterBuilder(TEST_STREAM) + .setEnableConnectionPool(true) + .setLocation("us") + .build(); + + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .setUpdatedSchema(UPDATED_TABLE_SCHEMA) + .setWriteStream(TEST_STREAM) + .build()); + testBigQueryWrite.addResponse(createAppendResponse(1)); + testBigQueryWrite.addResponse(createAppendResponse(2)); + testBigQueryWrite.addResponse(createAppendResponse(3)); + // Append request with old schema for writer 1. + JSONObject foo = new JSONObject(); + foo.put("foo", "aaa"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + + // Append request with old schema for writer 2. + JSONObject baz = new JSONObject(); + baz.put("baz", "bbb"); + JSONArray jsonArr2 = new JSONArray(); + jsonArr2.put(baz); + + // Append request with new schema. + JSONObject updatedFoo = new JSONObject(); + updatedFoo.put("foo", "aaa"); + updatedFoo.put("bar", "bbb"); + JSONArray updatedJsonArr = new JSONArray(); + updatedJsonArr.put(updatedFoo); + + // This append will trigger new schema update. + ApiFuture appendFuture1 = writer1.append(jsonArr); + // Sleep for a small period of time to make sure the updated schema is stored. + Sleeper.DEFAULT.sleep(300); + // Write to writer 1 again, new schema should be used. + // The following two append will succeeds. + ApiFuture appendFuture2 = writer1.append(updatedJsonArr); + ApiFuture appendFuture3 = writer1.append(jsonArr); + + // Second phase of the test: create another writer. + // Expect the append went through without using the updated schema + JsonStreamWriter writer2 = + getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA_2) + .setEnableConnectionPool(true) + .setLocation("us") + .build(); + ApiFuture appendFuture4 = writer2.append(jsonArr2); + + assertEquals(0L, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1L, appendFuture2.get().getAppendResult().getOffset().getValue()); + assertEquals(2L, appendFuture3.get().getAppendResult().getOffset().getValue()); + assertEquals(3L, appendFuture4.get().getAppendResult().getOffset().getValue()); + + // The 1st schema comes from writer1's initial schema + assertEquals( + testBigQueryWrite.getAppendRequests().get(0).getProtoRows().getWriterSchema(), + PROTO_SCHEMA); + // The 2nd schema comes from updated schema + assertEquals( + testBigQueryWrite.getAppendRequests().get(1).getProtoRows().getWriterSchema(), + UPDATED_PROTO_SCHEMA); + // No new schema. + assertEquals( + testBigQueryWrite.getAppendRequests().get(2).getProtoRows().getWriterSchema(), + ProtoSchema.getDefaultInstance()); + // The 4th schema come from the + assertEquals( + testBigQueryWrite.getAppendRequests().get(3).getProtoRows().getWriterSchema(), + PROTO_SCHEMA_2); + writer1.close(); + writer2.close(); + } + + @Test + void testWithoutIgnoreUnknownFieldsUpdateFail() throws Exception { + TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).build(); + // GetWriteStream is called once but failed to update to the right schema. + testBigQueryWrite.addResponse( + WriteStream.newBuilder().setName(TEST_STREAM).setTableSchema(tableSchema).build()); + testBigQueryWrite.addResponse( + WriteStream.newBuilder().setName(TEST_STREAM).setTableSchema(tableSchema).build()); + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, tableSchema).build()) { + JSONObject foo = new JSONObject(); + foo.put("test_int", 10); + JSONObject bar = new JSONObject(); + bar.put("test_unknown", 10); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + jsonArr.put(bar); + AppendSerializationError ex = + assertThrows(AppendSerializationError.class, () -> writer.append(jsonArr)); + assertEquals( + "The source object has fields unknown to BigQuery: root.test_unknown.", + ex.getRowIndexToErrorMessage().get(1)); + assertEquals(TEST_STREAM, ex.getStreamName()); + } + } + + @Test + void testWithIgnoreUnknownFields() throws Exception { + TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).build(); + try (JsonStreamWriter writer = + JsonStreamWriter.newBuilder(TEST_STREAM, tableSchema) + .setChannelProvider(channelProvider) + .setIgnoreUnknownFields(true) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setExecutorProvider(InstantiatingExecutorProvider.newBuilder().build()) + .build()) { + testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().build()); + JSONObject foo = new JSONObject(); + foo.put("test_int", 10); + JSONObject bar = new JSONObject(); + bar.put("test_unknown", 10); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + jsonArr.put(bar); + ApiFuture appendFuture = writer.append(jsonArr); + appendFuture.get(); + } + } + + @Test + void testFlowControlSetting() throws Exception { + TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).build(); + try (JsonStreamWriter writer = + JsonStreamWriter.newBuilder(TEST_STREAM, tableSchema) + .setChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setExecutorProvider(InstantiatingExecutorProvider.newBuilder().build()) + .setFlowControlSettings( + FlowControlSettings.newBuilder() + .setLimitExceededBehavior(FlowController.LimitExceededBehavior.ThrowException) + .setMaxOutstandingRequestBytes(1L) + .build()) + .build()) { + JSONObject foo = new JSONObject(); + foo.put("test_int", 10); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + StatusRuntimeException ex = + assertThrows( + StatusRuntimeException.class, + () -> { + writer.append(jsonArr); + }); + assertEquals(ex.getStatus().getCode(), Status.RESOURCE_EXHAUSTED.getCode()); + assertTrue( + ex.getStatus() + .getDescription() + .contains( + "Exceeds client side inflight buffer, consider add more buffer or open more" + + " connections")); + } + } + + // This is to test the new addition didn't break previous settings, i.e., sets the inflight limit + // without limit beahvior. + @Test + void testFlowControlSettingNoLimitBehavior() throws Exception { + TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).build(); + try (JsonStreamWriter writer = + JsonStreamWriter.newBuilder(TEST_STREAM, tableSchema) + .setChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setExecutorProvider(InstantiatingExecutorProvider.newBuilder().build()) + .setFlowControlSettings( + FlowControlSettings.newBuilder().setMaxOutstandingRequestBytes(1L).build()) + .build()) { + testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().build()); + JSONObject foo = new JSONObject(); + foo.put("test_int", 10); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + ApiFuture appendFuture = writer.append(jsonArr); + appendFuture.get(); + } + } + + @Test + void testMultipleAppendSerializationErrors() + throws DescriptorValidationException, IOException, InterruptedException { + FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); + JSONObject foo = new JSONObject(); + // put a field which is not part of the expected schema + foo.put("not_foo", "allen"); + JSONObject foo1 = new JSONObject(); + // put a vaild value into the field + foo1.put("foo", "allen"); + JSONObject foo2 = new JSONObject(); + // put a field which is not part of the expected schema + foo2.put("not_bar", "woody"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + jsonArr.put(foo1); + jsonArr.put(foo2); + testBigQueryWrite.addResponse( + WriteStream.newBuilder().setName(TEST_STREAM).setTableSchema(TABLE_SCHEMA).build()); + testBigQueryWrite.addResponse( + WriteStream.newBuilder().setName(TEST_STREAM).setTableSchema(TABLE_SCHEMA).build()); + + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { + AppendSerializationError appendSerializationError = + assertThrows(AppendSerializationError.class, () -> writer.append(jsonArr)); + Map rowIndexToErrorMessage = + appendSerializationError.getRowIndexToErrorMessage(); + assertEquals( + ImmutableMap.of( + 0, "The source object has fields unknown to BigQuery: root.not_foo.", + 2, "The source object has fields unknown to BigQuery: root.not_bar."), + rowIndexToErrorMessage); + } + } + + @Test + void testBadStringToNumericRowError() + throws DescriptorValidationException, IOException, InterruptedException { + TableSchema TABLE_SCHEMA = + TableSchema.newBuilder() + .addFields( + 0, + TableFieldSchema.newBuilder() + .setName("test_field_type") + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .build(); + SchemaTest.StringType expectedProto = + SchemaTest.StringType.newBuilder().setTestFieldType("allen").build(); + JSONObject foo = new JSONObject(); + // put a field which is not part of the expected schema + foo.put("test_field_type", "allen"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { + AppendSerializationError appendSerializationError = + assertThrows(AppendSerializationError.class, () -> writer.append(jsonArr)); + Map rowIndexToErrorMessage = + appendSerializationError.getRowIndexToErrorMessage(); + assertEquals(1, rowIndexToErrorMessage.size()); + assertTrue( + rowIndexToErrorMessage + .get(0) + .startsWith("Field root.test_field_type failed to convert to NUMERIC. Error:")); + } + } + + @Test + void testWriterId() throws DescriptorValidationException, IOException, InterruptedException { + JsonStreamWriter writer1 = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build(); + assertFalse(writer1.getWriterId().isEmpty()); + JsonStreamWriter writer2 = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build(); + assertFalse(writer2.getWriterId().isEmpty()); + assertNotEquals(writer1.getWriterId(), writer2.getWriterId()); + } + + @Test + void testIsDone() throws DescriptorValidationException, IOException, InterruptedException { + JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build(); + assertFalse(writer.isClosed()); + writer.close(); + assertTrue(writer.isClosed()); + assertTrue(writer.isUserClosed()); + } + + private AppendRowsResponse createAppendResponse(long offset) { + return AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(offset)).build()) + .build(); + } + + @Test + void testAppendWithMissingValueMap() throws Exception { + TableFieldSchema field = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test-列") + .build(); + TableSchema tableSchema = TableSchema.newBuilder().addFields(0, field).build(); + FlexibleType expectedProto = FlexibleType.newBuilder().setColDGVzdC3LiJc("allen").build(); + JSONObject flexible = new JSONObject(); + flexible.put("test-列", "allen"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(flexible); + + Map missingValueMap = new HashMap<>(); + missingValueMap.put("col1", AppendRowsRequest.MissingValueInterpretation.NULL_VALUE); + missingValueMap.put("col3", AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE); + + try (JsonStreamWriter writer = + getTestJsonStreamWriterBuilder(TEST_STREAM, tableSchema) + .setMissingValueInterpretationMap(missingValueMap) + .setTraceId("test:empty") + .build()) { + + assertEquals(missingValueMap, writer.getMissingValueInterpretationMap()); + + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .build()); + + ApiFuture appendFuture = writer.append(jsonArr); + assertEquals(0L, appendFuture.get().getAppendResult().getOffset().getValue()); + appendFuture.get(); + assertEquals( + testBigQueryWrite.getAppendRequests().get(0).getMissingValueInterpretations(), + missingValueMap); + } + } + + @Test + void testWrongCompressionType() throws Exception { + IllegalArgumentException ex = + assertThrows( + IllegalArgumentException.class, + () -> { + getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA) + .setCompressorName("not-gzip") + .build(); + }); + assertTrue( + ex.getMessage() + .contains( + "Compression of type \"not-gzip\" isn't supported, only \"gzip\" compression is" + + " supported.")); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java new file mode 100644 index 000000000000..8633975cc4fd --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java @@ -0,0 +1,2041 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.cloud.bigquery.storage.test.JsonTest.*; +import com.google.cloud.bigquery.storage.test.SchemaTest.*; +import com.google.cloud.bigquery.storage.v1.Exceptions.RowIndexToErrorException; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.ByteString; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.DynamicMessage; +import com.google.protobuf.Int64Value; +import com.google.protobuf.Message; +import com.google.protobuf.Timestamp; +import java.math.BigDecimal; +import java.time.Instant; +import java.time.LocalTime; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.logging.Logger; +import org.json.JSONArray; +import org.json.JSONObject; +import org.junit.jupiter.api.Test; + +class JsonToProtoMessageTest { + private static final Logger LOG = Logger.getLogger(JsonToProtoMessageTest.class.getName()); + private static ImmutableMap AllTypesToDebugMessageTest = + new ImmutableMap.Builder() + .put(BoolType.getDescriptor(), "boolean") + .put(BytesType.getDescriptor(), "bytes") + .put(Int64Type.getDescriptor(), "int64") + .put(Int32Type.getDescriptor(), "int32") + .put(DoubleType.getDescriptor(), "double") + .put(StringType.getDescriptor(), "string") + .put(RepeatedType.getDescriptor(), "array") + .put(ObjectType.getDescriptor(), "object") + .build(); + + private static ImmutableMap AllTypesToCorrectProto = + new ImmutableMap.Builder() + .put( + BoolType.getDescriptor(), + new Message[] {BoolType.newBuilder().setTestFieldType(true).build()}) + .put( + BytesType.getDescriptor(), + new Message[] { + BytesType.newBuilder().setTestFieldType(ByteString.copyFromUtf8("test")).build(), + BytesType.newBuilder() + .setTestFieldType(ByteString.copyFrom(new byte[] {1, 2, 3})) + .build() + }) + .put( + Int64Type.getDescriptor(), + new Message[] { + Int64Type.newBuilder().setTestFieldType(Long.MAX_VALUE).build(), + Int64Type.newBuilder().setTestFieldType(new Long(Integer.MAX_VALUE)).build() + }) + .put( + Int32Type.getDescriptor(), + new Message[] {Int32Type.newBuilder().setTestFieldType(Integer.MAX_VALUE).build()}) + .put( + DoubleType.getDescriptor(), + new Message[] { + DoubleType.newBuilder().setTestFieldType(Long.MAX_VALUE).build(), + DoubleType.newBuilder().setTestFieldType(Integer.MAX_VALUE).build(), + DoubleType.newBuilder().setTestFieldType(1.23).build() + }) + .put( + StringType.getDescriptor(), + new Message[] { + StringType.newBuilder().setTestFieldType("9223372036854775807").build(), + StringType.newBuilder().setTestFieldType("2147483647").build(), + StringType.newBuilder().setTestFieldType("true").build(), + StringType.newBuilder().setTestFieldType("test").build() + }) + .put( + RepeatedType.getDescriptor(), + new Message[] { + RepeatedType.newBuilder() + .addAllTestFieldType( + new ArrayList() { + { + add(1L); + add(2L); + add(3L); + } + }) + .build() + }) + .put( + ObjectType.getDescriptor(), + new Message[] { + ObjectType.newBuilder() + .setTestFieldType(ComplexLvl2.newBuilder().setTestInt(1).build()) + .build() + }) + .build(); + + private static ImmutableMap AllRepeatedTypesToDebugMessageTest = + new ImmutableMap.Builder() + .put(RepeatedBool.getDescriptor(), "boolean") + .put(RepeatedBytes.getDescriptor(), "bytes") + .put(RepeatedInt64.getDescriptor(), "int64") + .put(RepeatedInt32.getDescriptor(), "int32") + .put(RepeatedDouble.getDescriptor(), "double") + .put(RepeatedString.getDescriptor(), "string") + .put(RepeatedObject.getDescriptor(), "object") + .build(); + + private static ImmutableMap AllRepeatedTypesToCorrectProto = + new ImmutableMap.Builder() + .put( + RepeatedBool.getDescriptor(), + new Message[] { + RepeatedBool.newBuilder().addTestRepeated(true).addTestRepeated(false).build() + }) + .put( + RepeatedBytes.getDescriptor(), + new Message[] { + RepeatedBytes.newBuilder() + .addTestRepeated(ByteString.copyFrom(new byte[] {0})) + .addTestRepeated(ByteString.copyFrom(new byte[] {0, -116, -122, 71})) + .build(), + RepeatedBytes.newBuilder() + .addTestRepeated( + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("0"))) + .addTestRepeated( + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal("1.2"))) + .build() + }) + .put( + RepeatedString.getDescriptor(), + new Message[] { + RepeatedString.newBuilder().addTestRepeated("9223372036854775807").build(), + RepeatedString.newBuilder().addTestRepeated("2147483647").build(), + RepeatedString.newBuilder().addTestRepeated("true").build(), + RepeatedString.newBuilder().addTestRepeated("hello").addTestRepeated("test").build() + }) + .put( + RepeatedInt64.getDescriptor(), + new Message[] { + RepeatedInt64.newBuilder() + .addTestRepeated(Long.MAX_VALUE) + .addTestRepeated(Long.MIN_VALUE) + .addTestRepeated(Integer.MAX_VALUE) + .addTestRepeated(Integer.MIN_VALUE) + .addTestRepeated(Short.MAX_VALUE) + .addTestRepeated(Short.MIN_VALUE) + .addTestRepeated(Byte.MAX_VALUE) + .addTestRepeated(Byte.MIN_VALUE) + .addTestRepeated(0) + .build(), + RepeatedInt64.newBuilder() + .addTestRepeated(Integer.MAX_VALUE) + .addTestRepeated(Integer.MIN_VALUE) + .addTestRepeated(Short.MAX_VALUE) + .addTestRepeated(Short.MIN_VALUE) + .addTestRepeated(Byte.MAX_VALUE) + .addTestRepeated(Byte.MIN_VALUE) + .addTestRepeated(0) + .build() + }) + .put( + RepeatedInt32.getDescriptor(), + new Message[] { + RepeatedInt32.newBuilder() + .addTestRepeated(Integer.MAX_VALUE) + .addTestRepeated(Integer.MIN_VALUE) + .addTestRepeated(Short.MAX_VALUE) + .addTestRepeated(Short.MIN_VALUE) + .addTestRepeated(Byte.MAX_VALUE) + .addTestRepeated(Byte.MIN_VALUE) + .addTestRepeated(0) + .build() + }) + .put( + RepeatedDouble.getDescriptor(), + new Message[] { + RepeatedDouble.newBuilder() + .addTestRepeated(Long.MAX_VALUE) + .addTestRepeated(Long.MIN_VALUE) + .addTestRepeated(Integer.MAX_VALUE) + .addTestRepeated(Integer.MIN_VALUE) + .addTestRepeated(Short.MAX_VALUE) + .addTestRepeated(Short.MIN_VALUE) + .addTestRepeated(Byte.MAX_VALUE) + .addTestRepeated(Byte.MIN_VALUE) + .addTestRepeated(0) + .build(), + RepeatedDouble.newBuilder() + .addTestRepeated(Integer.MAX_VALUE) + .addTestRepeated(Integer.MIN_VALUE) + .addTestRepeated(Short.MAX_VALUE) + .addTestRepeated(Short.MIN_VALUE) + .addTestRepeated(Byte.MAX_VALUE) + .addTestRepeated(Byte.MIN_VALUE) + .addTestRepeated(0) + .build(), + RepeatedDouble.newBuilder() + .addTestRepeated(Double.MAX_VALUE) + .addTestRepeated(Double.MIN_VALUE) + .addTestRepeated(Float.MAX_VALUE) + .addTestRepeated(Float.MIN_VALUE) + .build(), + RepeatedDouble.newBuilder() + .addTestRepeated(Float.MAX_VALUE) + .addTestRepeated(Float.MIN_VALUE) + .build() + }) + .put( + RepeatedObject.getDescriptor(), + new Message[] { + RepeatedObject.newBuilder() + .addTestRepeated(ComplexLvl2.newBuilder().setTestInt(1).build()) + .addTestRepeated(ComplexLvl2.newBuilder().setTestInt(2).build()) + .addTestRepeated(ComplexLvl2.newBuilder().setTestInt(3).build()) + .build() + }) + .build(); + + private static JSONObject[] simpleJSONObjects = { + new JSONObject().put("test_field_type", Long.MAX_VALUE), + new JSONObject().put("test_field_type", Integer.MAX_VALUE), + new JSONObject().put("test_field_type", 1.23), + new JSONObject().put("test_field_type", true), + new JSONObject().put("test_field_type", ByteString.copyFromUtf8("test")), + new JSONObject().put("test_field_type", new JSONArray("[1, 2, 3]")), + new JSONObject().put("test_field_type", new JSONObject().put("test_int", 1)), + new JSONObject().put("test_field_type", "test") + }; + + private static JSONObject[] simpleJSONArrays = { + new JSONObject() + .put( + "test_repeated", + new JSONArray( + new Long[] { + Long.MAX_VALUE, + Long.MIN_VALUE, + (long) Integer.MAX_VALUE, + (long) Integer.MIN_VALUE, + (long) Short.MAX_VALUE, + (long) Short.MIN_VALUE, + (long) Byte.MAX_VALUE, + (long) Byte.MIN_VALUE, + 0L + })), + new JSONObject() + .put( + "test_repeated", + new JSONArray( + new Integer[] { + Integer.MAX_VALUE, + Integer.MIN_VALUE, + (int) Short.MAX_VALUE, + (int) Short.MIN_VALUE, + (int) Byte.MAX_VALUE, + (int) Byte.MIN_VALUE, + 0 + })), + new JSONObject() + .put( + "test_repeated", + new JSONArray( + new Double[] { + Double.MAX_VALUE, + Double.MIN_VALUE, + (double) Float.MAX_VALUE, + (double) Float.MIN_VALUE + })), + new JSONObject() + .put("test_repeated", new JSONArray(new Float[] {Float.MAX_VALUE, Float.MIN_VALUE})), + new JSONObject().put("test_repeated", new JSONArray(new Boolean[] {true, false})), + new JSONObject().put("test_repeated", new JSONArray(new String[] {"hello", "test"})), + new JSONObject() + .put( + "test_repeated", + new JSONArray( + new byte[][] { + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("0")) + .toByteArray(), + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("1.2")) + .toByteArray() + })), + new JSONObject().put("test_repeated", new JSONArray(new int[][] {{11111, 22222}})), + new JSONObject().put("test_repeated", new JSONArray(new char[][] {{'a', 'b'}, {'c'}})), + new JSONObject().put("test_repeated", new JSONArray(new String[][] {{"hello"}, {"test"}})), + new JSONObject() + .put( + "test_repeated", + new JSONArray( + new JSONObject[] { + new JSONObject().put("test_int", 1), + new JSONObject().put("test_int", 2), + new JSONObject().put("test_int", 3) + })) + }; + private final TableFieldSchema TEST_INT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INT64) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_int") + .build(); + private final TableFieldSchema TEST_STRING = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_string") + .build(); + private final TableFieldSchema TEST_BYTES = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BYTES) + .setMode(TableFieldSchema.Mode.REQUIRED) + .setName("test_bytes") + .build(); + private final TableFieldSchema TEST_BOOL = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BOOL) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bool") + .build(); + private final TableFieldSchema TEST_DOUBLE = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DOUBLE) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_double") + .build(); + private final TableFieldSchema TEST_DATE = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DATE) + .setMode(TableFieldSchema.Mode.REQUIRED) + .setName("test_date") + .build(); + private final TableFieldSchema TEST_DATETIME = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DATETIME) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_datetime") + .build(); + private final TableFieldSchema TEST_DATETIME_STR = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DATETIME) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_datetime_str") + .build(); + private final TableFieldSchema COMPLEXLVL2 = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRUCT) + .setMode(TableFieldSchema.Mode.REQUIRED) + .addFields(0, TEST_INT) + .setName("complex_lvl2") + .build(); + private final TableFieldSchema COMPLEXLVL1 = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRUCT) + .setMode(TableFieldSchema.Mode.REQUIRED) + .addFields(0, TEST_INT) + .addFields(1, COMPLEXLVL2) + .setName("complex_lvl1") + .build(); + private final TableFieldSchema TEST_NUMERIC = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric") + .build(); + private final TableFieldSchema TEST_NUMERIC_REPEATED = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_numeric_repeated") + .build(); + private final TableFieldSchema TEST_GEO = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.GEOGRAPHY) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_geo") + .build(); + private final TableFieldSchema TEST_TIMESTAMP = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_timestamp") + .build(); + private final TableFieldSchema TEST_TIMESTAMP_REPEATED = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_timestamp_repeated") + .build(); + private final TableFieldSchema TEST_TIME = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIME) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_time") + .build(); + private final TableFieldSchema TEST_TIME_STR = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIME) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_time_str") + .build(); + private final TableFieldSchema TEST_NUMERIC_STR = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_str") + .build(); + private final TableFieldSchema TEST_NUMERIC_SHORT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_short") + .build(); + private final TableFieldSchema TEST_NUMERIC_INT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_int") + .build(); + private final TableFieldSchema TEST_NUMERIC_LONG = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_long") + .build(); + private final TableFieldSchema TEST_NUMERIC_FLOAT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_float") + .build(); + private final TableFieldSchema TEST_NUMERIC_DOUBLE = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_double") + .build(); + private final TableFieldSchema TEST_BIGNUMERIC = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bignumeric") + .build(); + private final TableFieldSchema TEST_BIGNUMERIC_STR = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_bignumeric_str") + .build(); + private final TableFieldSchema TEST_BIGNUMERIC_SHORT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bignumeric_short") + .build(); + private final TableFieldSchema TEST_BIGNUMERIC_INT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bignumeric_int") + .build(); + private final TableFieldSchema TEST_BIGNUMERIC_LONG = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bignumeric_long") + .build(); + private final TableFieldSchema TEST_BIGNUMERIC_FLOAT = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bignumeric_float") + .build(); + private final TableFieldSchema TEST_BIGNUMERIC_DOUBLE = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_bignumeric_double") + .build(); + final TableFieldSchema TEST_INTERVAL = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.INTERVAL) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_interval") + .build(); + final TableFieldSchema TEST_JSON = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.JSON) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_json") + .build(); + final TableFieldSchema TEST_TIMESTAMP_HIGHER_PRECISION = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_timestamp_higher_precision") + .setTimestampPrecision(Int64Value.newBuilder().setValue(12).build()) + .build(); + private final TableFieldSchema TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_timestamp_higher_precision_repeated") + .setTimestampPrecision(Int64Value.newBuilder().setValue(12).build()) + .build(); + private final TableSchema COMPLEX_TABLE_SCHEMA = + TableSchema.newBuilder() + .addFields(0, TEST_INT) + .addFields(1, TEST_STRING) + .addFields(2, TEST_BYTES) + .addFields(3, TEST_BOOL) + .addFields(4, TEST_DOUBLE) + .addFields(5, TEST_DATE) + .addFields(6, TEST_DATETIME) + .addFields(7, TEST_DATETIME_STR) + .addFields(8, COMPLEXLVL1) + .addFields(9, COMPLEXLVL2) + .addFields(10, TEST_NUMERIC) + .addFields(11, TEST_GEO) + .addFields(12, TEST_TIMESTAMP) + .addFields(13, TEST_TIME) + .addFields(14, TEST_TIME_STR) + .addFields(15, TEST_NUMERIC_REPEATED) + .addFields(16, TEST_NUMERIC_STR) + .addFields(17, TEST_NUMERIC_SHORT) + .addFields(18, TEST_NUMERIC_INT) + .addFields(19, TEST_NUMERIC_LONG) + .addFields(20, TEST_NUMERIC_FLOAT) + .addFields(21, TEST_NUMERIC_DOUBLE) + .addFields(22, TEST_BIGNUMERIC) + .addFields(23, TEST_BIGNUMERIC_STR) + .addFields(24, TEST_BIGNUMERIC_SHORT) + .addFields(25, TEST_BIGNUMERIC_INT) + .addFields(26, TEST_BIGNUMERIC_LONG) + .addFields(27, TEST_BIGNUMERIC_FLOAT) + .addFields(28, TEST_BIGNUMERIC_DOUBLE) + .addFields(29, TEST_INTERVAL) + .addFields(30, TEST_JSON) + .addFields(31, TEST_TIMESTAMP_HIGHER_PRECISION) + .addFields(32, TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .build(); + + @Test + void testDifferentNameCasing() throws Exception { + TestInt64 expectedProto = + TestInt64.newBuilder().setByte(1).setShort(1).setInt(1).setLong(1).build(); + + JSONObject json = new JSONObject(); + json.put("bYtE", (byte) 1); + json.put("SHORT", (short) 1); + json.put("inT", 1); + json.put("lONg", 1L); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestInt64.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testBool() throws Exception { + TestBool expectedProto = + TestBool.newBuilder().setBool(true).setUppercase(true).setLowercase(false).build(); + JSONObject json = new JSONObject(); + json.put("bool", true); + json.put("uppercase", "TRUE"); + json.put("lowercase", "false"); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestBool.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testInt64() throws Exception { + TestInt64 expectedProto = + TestInt64.newBuilder().setByte(1).setShort(1).setInt(1).setLong(1).setString(1).build(); + JSONObject json = new JSONObject(); + json.put("byte", (byte) 1); // This does NOT actually verify byte as it is converted to int + json.put("short", (short) 1); // This does NOT actually verify short as it is converted to int + json.put("int", 1); + json.put("long", 1L); + json.put("string", "1"); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestInt64.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testInt64Extended() throws Exception { + TestInt64 expectedProto = + TestInt64.newBuilder().setByte(1).setShort(1).setInt(1).setLong(1).setString(1).build(); + Map map = new HashMap(); + map.put("byte", (byte) 1); + map.put("short", (short) 1); + map.put("int", (int) 1); + map.put("long", (long) 1); + map.put("string", "1"); + JSONObject json = new JSONObject(map); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestInt64.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testInt64Repeated() throws Exception { + RepeatedInt64 expectedProto = + RepeatedInt64.newBuilder() + .addTestRepeated(1) + .addTestRepeated(1) + .addTestRepeated(1) + .addTestRepeated(1) + .addTestRepeated(1) + .build(); + Collection collection = new ArrayList(); + collection.add((byte) 1); + collection.add((short) 1); + collection.add((int) 1); + collection.add((long) 1); + collection.add("1"); + JSONArray array = new JSONArray(collection); + JSONObject json = new JSONObject(); + json.put("test_repeated", array); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(RepeatedInt64.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testInt32() throws Exception { + TestInt32 expectedProto = + TestInt32.newBuilder().setByte(1).setShort(1).setInt(1).setString(1).build(); + JSONObject json = new JSONObject(); + json.put("byte", (byte) 1); + json.put("short", (short) 1); + json.put("int", 1); + json.put("string", 1); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestInt32.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testInt32NotMatchInt64() throws Exception { + JSONObject json = new JSONObject(); + json.put("byte", (byte) 1); + json.put("short", (short) 1); + json.put("int", 1L); + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestInt32.getDescriptor(), json)); + assertTrue(e.getMessage().contains("JSONObject does not have a int32 field at root.int.")); + } + + @Test + void testDateTimeMismatch() throws Exception { + TableFieldSchema field = + TableFieldSchema.newBuilder() + .setName("datetime") + .setType(TableFieldSchema.Type.DATETIME) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); + JSONObject json = new JSONObject(); + json.put("datetime", 1.0); + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestDatetime.getDescriptor(), tableSchema, json)); + assertTrue(e.getMessage().contains("JSONObject does not have a int64 field at root.datetime.")); + } + + private void dateTimeMatch_Internal(String jsonVal, Long expectedVal) throws Exception { + TableFieldSchema field = + TableFieldSchema.newBuilder() + .setName("datetime") + .setType(TableFieldSchema.Type.DATETIME) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); + TestDatetime expectedProto = TestDatetime.newBuilder().setDatetime(expectedVal).build(); + JSONObject json = new JSONObject(); + json.put("datetime", jsonVal); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestDatetime.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testDateTimeMatch() throws Exception { + dateTimeMatch_Internal("2021-09-27T20:51:10.752", 142258614586538368L); + dateTimeMatch_Internal("2021-09-27t20:51:10.752", 142258614586538368L); + dateTimeMatch_Internal("2021-09-27 20:51:10.752", 142258614586538368L); + dateTimeMatch_Internal("2021-9-27T20:51:10.752", 142258614586538368L); + dateTimeMatch_Internal("2021-09-27T00:00:00", 142258525253402624L); + dateTimeMatch_Internal("2021-09-27T00:0:00", 142258525253402624L); + dateTimeMatch_Internal("2021-09-27", 142258525253402624L); + } + + @Test + void testTimeMismatch() throws Exception { + TableFieldSchema field = + TableFieldSchema.newBuilder() + .setName("time") + .setType(TableFieldSchema.Type.TIME) + .setMode(TableFieldSchema.Mode.REPEATED) + .build(); + TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); + JSONObject json = new JSONObject(); + json.put("time", new JSONArray(new Double[] {1.0})); + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestTime.getDescriptor(), tableSchema, json)); + assertTrue(e.getMessage().contains("JSONObject does not have a int64 field at root.time[0].")); + } + + @Test + void testMixedCaseFieldNames() throws Exception { + TableFieldSchema field = + TableFieldSchema.newBuilder() + .setName("fooBar") + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); + + JSONObject json = new JSONObject(); + json.put("fooBar", "hello"); + + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestMixedCaseFieldNames.getDescriptor(), tableSchema, json); + } + + @Test + void testDouble() throws Exception { + TestDouble expectedProto = + TestDouble.newBuilder() + .setDouble(1.2) + .setFloat(3.4f) + .setByte(5) + .setShort(6) + .setInt(7) + .setLong(8) + .setString(9.1) + .build(); + JSONObject json = new JSONObject(); + json.put("double", 1.2); + json.put("float", 3.4f); + json.put("byte", new Byte((byte) 5)); + json.put("short", new Short((short) 6)); + json.put("int", 7); + json.put("long", 8L); + json.put("string", "9.1"); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestDouble.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testDoubleHighPrecision() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder() + .setName("numeric") + .setType(TableFieldSchema.Type.NUMERIC) + .build()) + .build(); + TestNumeric expectedProto = + TestNumeric.newBuilder() + .setNumeric( + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal("3.400500513"))) + .build(); + JSONObject json = new JSONObject(); + json.put("numeric", 3.400500512978076); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestNumeric.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testDoubleHighPrecision_RepeatedField() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields( + 0, + TableFieldSchema.newBuilder() + .setName("bignumeric") + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.REPEATED) + .build()) + .build(); + TestBignumeric expectedProto = + TestBignumeric.newBuilder() + .addBignumeric( + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal("3.400500513"))) + .addBignumeric( + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("0.1"))) + .addBignumeric( + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("0.12"))) + .build(); + JSONObject json = new JSONObject(); + json.put("bignumeric", ImmutableList.of(3.400500512978076, 0.10000000000055, 0.12)); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestBignumeric.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testTimestamp() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields(TableFieldSchema.newBuilder(TEST_TIMESTAMP).setName("test_string").build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP).setName("test_string_T_Z").build()) + .addFields(TableFieldSchema.newBuilder(TEST_TIMESTAMP).setName("test_long").build()) + .addFields(TableFieldSchema.newBuilder(TEST_TIMESTAMP).setName("test_int").build()) + .addFields(TableFieldSchema.newBuilder(TEST_TIMESTAMP).setName("test_float").build()) + .addFields(TableFieldSchema.newBuilder(TEST_TIMESTAMP).setName("test_offset").build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP).setName("test_zero_offset").build()) + .addFields(TableFieldSchema.newBuilder(TEST_TIMESTAMP).setName("test_timezone").build()) + .addFields(TableFieldSchema.newBuilder(TEST_TIMESTAMP).setName("test_saformat").build()) + .build(); + TestTimestamp expectedProto = + TestTimestamp.newBuilder() + .setTestString(10L) + .setTestStringTZ(1648493279010000L) + .setTestLong(1687984085000000L) + .setTestInt(153480695L) + .setTestFloat(153468069500L) + .setTestOffset(1649135171000000L) + .setTestZeroOffset(1648493279010000L) + .setTestTimezone(1649174771000000L) + .setTestSaformat(1534680660000000L) + .build(); + JSONObject json = new JSONObject(); + json.put("test_string", "1970-01-01 00:00:00.000010"); + json.put("test_string_T_Z", "2022-03-28T18:47:59.01Z"); + json.put("test_long", 1687984085000000L); + json.put("test_int", 153480695); + json.put("test_float", "1.534680695e11"); + json.put("test_offset", "2022-04-05T09:06:11+04:00"); + json.put("test_zero_offset", "2022-03-28T18:47:59.01+00:00"); + json.put("test_timezone", "2022-04-05 09:06:11 PST"); + json.put("test_saformat", "2018/08/19 12:11"); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestTimestamp.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testTimestamp_higherPrecision() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_string") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_string_T_Z") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_long") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_int") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_float") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_offset") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_zero_offset") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_timezone") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_saformat") + .build()) + .build(); + + TestTimestampHigherPrecision expectedProto = + TestTimestampHigherPrecision.newBuilder() + .setTestString("1970-01-01T00:00:00.000010+00:00") + .setTestStringTZ("2022-03-28T18:47:59.010000+00:00") + .setTestLong("2023-06-28T20:28:05.000000+00:00") + .setTestInt("1970-01-01T00:02:33.480695+00:00") + .setTestFloat("1970-01-02T18:37:48.069500+00:00") + .setTestOffset("2022-04-05T05:06:11.000000+00:00") + .setTestZeroOffset("2022-03-28T18:47:59.010000+00:00") + .setTestTimezone("2022-04-05T16:06:11.000000+00:00") + .setTestSaformat("2018-08-19T12:11:00.000000+00:00") + .build(); + JSONObject json = new JSONObject(); + json.put("test_string", "1970-01-01 00:00:00.000010"); + json.put("test_string_T_Z", "2022-03-28T18:47:59.01Z"); + json.put("test_long", 1687984085000000L); + json.put("test_int", 153480695); + json.put("test_float", "1.534680695e11"); + json.put("test_offset", "2022-04-05T09:06:11+04:00"); + json.put("test_zero_offset", "2022-03-28T18:47:59.01+00:00"); + json.put("test_timezone", "2022-04-05 09:06:11 PST"); + json.put("test_saformat", "2018/08/19 12:11"); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestTimestampHigherPrecision.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testTimestampRepeated() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_string_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_string_T_Z_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_long_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_int_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_float_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_offset_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_zero_offset_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_timezone_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_saformat_repeated") + .build()) + .build(); + TestRepeatedTimestamp expectedProto = + TestRepeatedTimestamp.newBuilder() + .addTestStringRepeated(10L) + .addTestStringTZRepeated(1648493279010000L) + .addTestLongRepeated(1687984085000000L) + .addTestIntRepeated(153480695L) + .addTestFloatRepeated(153468069500L) + .addTestOffsetRepeated(1649135171000000L) + .addTestZeroOffsetRepeated(1648493279010000L) + .addTestTimezoneRepeated(1649174771000000L) + .addTestSaformatRepeated(1534680660000000L) + .build(); + JSONObject json = new JSONObject(); + json.put("test_string_repeated", new JSONArray(new String[] {"1970-01-01 00:00:00.000010"})); + json.put("test_string_T_Z_repeated", new JSONArray(new String[] {"2022-03-28T18:47:59.01Z"})); + json.put("test_long_repeated", new JSONArray(new Long[] {1687984085000000L})); + json.put("test_int_repeated", new JSONArray(new Integer[] {153480695})); + json.put("test_float_repeated", new JSONArray(new String[] {"1.534680695e11"})); + json.put("test_offset_repeated", new JSONArray(new String[] {"2022-04-05T09:06:11+04:00"})); + json.put( + "test_zero_offset_repeated", new JSONArray(new String[] {"2022-03-28T18:47:59.01+00:00"})); + json.put("test_timezone_repeated", new JSONArray(new String[] {"2022-04-05 09:06:11 PST"})); + json.put("test_saformat_repeated", new JSONArray(new String[] {"2018/08/19 12:11"})); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestRepeatedTimestamp.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testTimestampRepeated_higherPrecision() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_string_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_string_T_Z_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_long_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_int_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_float_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_offset_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_zero_offset_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_timezone_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_saformat_repeated") + .build()) + .build(); + + TestRepeatedTimestampHigherPrecision expectedProto = + TestRepeatedTimestampHigherPrecision.newBuilder() + .addTestStringRepeated("1970-01-01T00:00:00.000010+00:00") + .addTestStringTZRepeated("2022-03-28T18:47:59.010000+00:00") + .addTestLongRepeated("2023-06-28T20:28:05.000000+00:00") + .addTestIntRepeated("1970-01-01T00:02:33.480695+00:00") + .addTestFloatRepeated("1970-01-02T18:37:48.069500+00:00") + .addTestOffsetRepeated("2022-04-05T05:06:11.000000+00:00") + .addTestZeroOffsetRepeated("2022-03-28T18:47:59.010000+00:00") + .addTestTimezoneRepeated("2022-04-05T16:06:11.000000+00:00") + .addTestSaformatRepeated("2018-08-19T12:11:00.000000+00:00") + .build(); + JSONObject json = new JSONObject(); + json.put("test_string_repeated", new JSONArray(new String[] {"1970-01-01 00:00:00.000010"})); + json.put("test_string_T_Z_repeated", new JSONArray(new String[] {"2022-03-28T18:47:59.01Z"})); + json.put("test_long_repeated", new JSONArray(new Long[] {1687984085000000L})); + json.put("test_int_repeated", new JSONArray(new Integer[] {153480695})); + json.put("test_float_repeated", new JSONArray(new String[] {"1.534680695e11"})); + json.put("test_offset_repeated", new JSONArray(new String[] {"2022-04-05T09:06:11+04:00"})); + json.put( + "test_zero_offset_repeated", new JSONArray(new String[] {"2022-03-28T18:47:59.01+00:00"})); + json.put("test_timezone_repeated", new JSONArray(new String[] {"2022-04-05 09:06:11 PST"})); + json.put("test_saformat_repeated", new JSONArray(new String[] {"2018/08/19 12:11"})); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestRepeatedTimestampHigherPrecision.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testDate() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields(TableFieldSchema.newBuilder(TEST_DATE).setName("test_string").build()) + .addFields(TableFieldSchema.newBuilder(TEST_DATE).setName("test_long").build()) + .build(); + TestDate expectedProto = TestDate.newBuilder().setTestString(18935).setTestLong(18935).build(); + JSONObject json = new JSONObject(); + json.put("test_string", "2021-11-04"); + json.put("test_long", 18935L); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestDate.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testAllTypes() throws Exception { + for (Map.Entry entry : AllTypesToDebugMessageTest.entrySet()) { + int success = 0; + for (JSONObject json : simpleJSONObjects) { + try { + LOG.info("Testing " + json + " over " + entry.getKey().getFullName()); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(entry.getKey(), json); + LOG.info("Convert Success!"); + assertEquals(AllTypesToCorrectProto.get(entry.getKey())[success], protoMsg); + success += 1; + } catch (IllegalArgumentException e) { + assertTrue( + e.getMessage() + .contains( + "JSONObject does not have a " + + entry.getValue() + + " field at root.test_field_type.")); + } + } + if (entry.getKey() == DoubleType.getDescriptor()) { + assertEquals(3, success, entry.getKey().getFullName()); + } else if (entry.getKey() == Int64Type.getDescriptor() + || entry.getKey() == BytesType.getDescriptor()) { + assertEquals(2, success, entry.getKey().getFullName()); + } else if (entry.getKey() == StringType.getDescriptor()) { + assertEquals(4, success, entry.getKey().getFullName()); + } else { + assertEquals(1, success, entry.getKey().getFullName()); + } + } + } + + @Test + void testAllRepeatedTypesWithLimits() throws Exception { + for (Map.Entry entry : AllRepeatedTypesToDebugMessageTest.entrySet()) { + int success = 0; + for (JSONObject json : simpleJSONArrays) { + try { + LOG.info("Testing " + json + " over " + entry.getKey().getFullName()); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(entry.getKey(), json); + LOG.info("Convert Success!"); + assertEquals( + AllRepeatedTypesToCorrectProto.get(entry.getKey())[success], + protoMsg, + protoMsg.toString()); + success += 1; + } catch (IllegalArgumentException e) { + LOG.info(e.getMessage()); + assertTrue( + e.getMessage() + .contains( + "JSONObject does not have a " + + entry.getValue() + + " field at root.test_repeated[0].") + || e.getMessage() + .contains("Error: root.test_repeated[0] could not be converted to byte[].")); + } + } + if (entry.getKey() == RepeatedDouble.getDescriptor()) { + assertEquals(4, success, entry.getKey().getFullName()); + } else if (entry.getKey() == RepeatedInt64.getDescriptor()) { + assertEquals(2, success, entry.getKey().getFullName()); + } else if (entry.getKey() == RepeatedString.getDescriptor()) { + assertEquals(4, success, entry.getKey().getFullName()); + } else { + assertEquals(1, success, entry.getKey().getFullName()); + } + } + } + + @Test + void testOptional() throws Exception { + TestInt64 expectedProto = TestInt64.newBuilder().setByte(1).build(); + JSONObject json = new JSONObject(); + json.put("byte", 1); + + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestInt64.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testRepeatedIsOptional() throws Exception { + TestRepeatedIsOptional expectedProto = + TestRepeatedIsOptional.newBuilder().setRequiredDouble(1.1).build(); + JSONObject json = new JSONObject(); + json.put("required_double", 1.1); + + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestRepeatedIsOptional.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testRequired() throws Exception { + JSONObject json = new JSONObject(); + json.put("optional_double", 1.1); + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestRequired.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains("JSONObject does not have the required field root.required_double.")); + } + + @Test + void testRange() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder() + .setName("range_date") + .setType(TableFieldSchema.Type.RANGE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.DATE) + .build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("range_datetime") + .setType(TableFieldSchema.Type.RANGE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.DATETIME) + .build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("range_timestamp") + .setType(TableFieldSchema.Type.RANGE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("raNGe_daTE_miXEd_caSE") + .setType(TableFieldSchema.Type.RANGE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.DATE) + .build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("ranGE_daTEtiME_miXEd_caSE") + .setType(TableFieldSchema.Type.RANGE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.DATETIME) + .build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("raNGe_tiMEstAMp_miXEd_caSE") + .setType(TableFieldSchema.Type.RANGE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .build(); + + TestRange expectedProto = + TestRange.newBuilder() + .setRangeDate(TestRangeDate.newBuilder().setStart(18262).setEnd(18627)) + .setRangeDatetime( + TestRangeDatetime.newBuilder().setStart(1715360343).setEnd(1715446743)) + .setRangeTimestamp( + TestRangeTimestamp.newBuilder().setStart(1715360343).setEnd(1715446743)) + .setRangeDateMixedCase(TestRangeDate.newBuilder().setStart(1).setEnd(2)) + .setRangeDatetimeMixedCase( + TestRangeDatetime.newBuilder() + .setStart(142258614586538368L) + .setEnd(142258525253402624L)) + .setRangeTimestampMixedCase( + TestRangeTimestamp.newBuilder().setStart(10L).setEnd(1649174771000000L)) + .build(); + + JSONArray data = new JSONArray(); + JSONObject row = new JSONObject(); + + JSONObject rangeDate = new JSONObject(); + rangeDate.put("start", 18262); + rangeDate.put("end", 18627); + row.put("range_date", rangeDate); + + JSONObject rangeDatetime = new JSONObject(); + rangeDatetime.put("start", 1715360343); + rangeDatetime.put("end", 1715446743); + row.put("range_datetime", rangeDatetime); + + JSONObject rangeTimestamp = new JSONObject(); + rangeTimestamp.put("start", 1715360343); + rangeTimestamp.put("end", 1715446743); + row.put("range_timestamp", rangeTimestamp); + + JSONObject rangeDateMixedCase = new JSONObject(); + rangeDateMixedCase.put("START", "1970-01-02"); + rangeDateMixedCase.put("eND", "1970-01-03"); + row.put("range_date_mixed_case", rangeDateMixedCase); + + JSONObject rangeDatetimeMixedCase = new JSONObject(); + rangeDatetimeMixedCase.put("STaRT", "2021-09-27T20:51:10.752"); + rangeDatetimeMixedCase.put("END", "2021-09-27T00:00:00"); + row.put("range_datetime_mixed_case", rangeDatetimeMixedCase); + + JSONObject rangeTimestampMixedCase = new JSONObject(); + rangeTimestampMixedCase.put("START", "1970-01-01 00:00:00.000010"); + rangeTimestampMixedCase.put("eND", "2022-04-05 09:06:11 PST"); + row.put("range_timestamp_mixed_case", rangeTimestampMixedCase); + + data.put(row); + List protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestRange.getDescriptor(), tableSchema, data, false); + assertEquals(expectedProto, protoMsg.get(0)); + } + + @Test + void testStructSimple() throws Exception { + structSimple("test", "test"); + structSimple(true, "true"); + structSimple(1, "1"); + structSimple((short) 1, "1"); + structSimple((long) 1, "1"); + } + + private void structSimple(Object value, String expected) throws Exception { + MessageType expectedProto = + MessageType.newBuilder() + .setTestFieldType(StringType.newBuilder().setTestFieldType(expected).build()) + .build(); + JSONObject stringType = new JSONObject(ImmutableMap.of("test_field_type", value)); + JSONObject json = new JSONObject(ImmutableMap.of("test_field_type", stringType)); + + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(MessageType.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testStructSimpleFail() throws Exception { + JSONObject stringType = new JSONObject(); + stringType.put("test_field_type", new boolean[0]); + JSONObject json = new JSONObject(); + json.put("test_field_type", stringType); + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + MessageType.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains( + "JSONObject does not have a string field at" + + " root.test_field_type.test_field_type.")); + } + + @Test + void testStructComplex() throws Exception { + ComplexRoot expectedProto = + ComplexRoot.newBuilder() + .setTestInt(1) + .addTestString("a") + .addTestString("b") + .addTestString("c") + .setTestBytes(ByteString.copyFrom("hello".getBytes())) + .setTestBool(true) + .addTestDouble(1.1) + .addTestDouble(2.2) + .addTestDouble(3.3) + .addTestDouble(4.4) + .setTestDate(1) + .setTestDatetime(1) + .addTestDatetimeStr(142258614586538368L) + .addTestDatetimeStr(142258614586538368L) + .addTestDatetimeStr(142258614586538368L) + .addTestDatetimeStr(142258525253402624L) + .addTestDatetimeStr(142258525253402624L) + .setComplexLvl1( + ComplexLvl1.newBuilder() + .setTestInt(2) + .setComplexLvl2(ComplexLvl2.newBuilder().setTestInt(3).build()) + .build()) + .setComplexLvl2(ComplexLvl2.newBuilder().setTestInt(3).build()) + .setTestNumeric( + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("1.23456"))) + .setTestGeo("POINT(1,1)") + .setTestTimestamp(12345678L) + .setTestTime(CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(1, 0, 1))) + .setTestTimeStr(89332507144L) + .addTestNumericRepeated( + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("0"))) + .addTestNumericRepeated( + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal("99999999999999999999999999999.999999999"))) + .addTestNumericRepeated( + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal("-99999999999999999999999999999.999999999"))) + .setTestNumericStr( + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("12.4"))) + .setTestNumericShort( + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(1))) + .setTestNumericInt( + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(2))) + .setTestNumericLong( + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(3L))) + .setTestNumericFloat( + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(4f))) + .setTestNumericDouble( + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(5D))) + .setTestBignumeric( + BigDecimalByteStringEncoder.encodeToBigNumericByteString( + new BigDecimal("578960446186580977117854925043439539266.3222222222"))) + .addTestBignumericStr( + BigDecimalByteStringEncoder.encodeToBigNumericByteString(new BigDecimal("1.23"))) + .setTestBignumericShort( + BigDecimalByteStringEncoder.encodeToBigNumericByteString(new BigDecimal(1))) + .setTestBignumericInt( + BigDecimalByteStringEncoder.encodeToBigNumericByteString(new BigDecimal(2))) + .setTestBignumericLong( + BigDecimalByteStringEncoder.encodeToBigNumericByteString(new BigDecimal(3L))) + .setTestBignumericFloat( + BigDecimalByteStringEncoder.encodeToBigNumericByteString(new BigDecimal(4f))) + .setTestBignumericDouble( + BigDecimalByteStringEncoder.encodeToBigNumericByteString(new BigDecimal(5D))) + .setTestInterval("0-0 0 0:0:0.000005") + .addTestJson("{'a':'b'}") + .setTestTimestampHigherPrecision("2025-12-01 12:34:56.123456789123+00:00") + .build(); + JSONObject complex_lvl2 = new JSONObject(); + complex_lvl2.put("test_int", 3); + + JSONObject complex_lvl1 = new JSONObject(); + complex_lvl1.put("test_int", 2); + complex_lvl1.put("complex_lvl2", complex_lvl2); + + JSONObject json = new JSONObject(); + json.put("test_int", 1); + json.put("test_string", new JSONArray(new String[] {"a", "b", "c"})); + json.put("test_bytes", ByteString.copyFromUtf8("hello")); + json.put("test_bool", true); + json.put("test_DOUBLe", new JSONArray(new Double[] {1.1, 2.2, 3.3, 4.4})); + json.put("test_date", 1); + json.put("test_datetime", 1); + json.put( + "test_datetime_str", + new JSONArray( + new String[] { + "2021-09-27T20:51:10.752", + "2021-09-27t20:51:10.752", + "2021-09-27 20:51:10.752", + "2021-09-27T00:00:00", + "2021-09-27" + })); + json.put("complex_lvl1", complex_lvl1); + json.put("complex_lvl2", complex_lvl2); + json.put( + "test_numeric", + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("1.23456"))); + json.put( + "test_numeric_repeated", + new JSONArray( + new byte[][] { + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("0")) + .toByteArray(), + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal("99999999999999999999999999999.999999999")) + .toByteArray(), + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal("-99999999999999999999999999999.999999999")) + .toByteArray(), + })); + json.put("test_geo", "POINT(1,1)"); + json.put("test_timestamp", 12345678); + json.put( + "test_time", CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(1, 0, 1))); + json.put("test_time_str", "20:51:10.1234"); + json.put("test_numeric_str", "12.4"); + json.put("test_numeric_short", 1); + json.put("test_numeric_int", 2); + json.put("test_numeric_long", 3L); + json.put("test_numeric_float", 4f); + json.put("test_numeric_double", 5D); + json.put( + "test_bignumeric", new BigDecimal("578960446186580977117854925043439539266.3222222222")); + json.put("test_bignumeric_str", new JSONArray(new String[] {"1.23"})); + json.put("test_bignumeric_short", 1); + json.put("test_bignumeric_int", 2); + json.put("test_bignumeric_long", 3L); + json.put("test_bignumeric_float", 4f); + json.put("test_bignumeric_double", 5D); + json.put("test_interval", "0-0 0 0:0:0.000005"); + json.put("test_json", new JSONArray(new String[] {"{'a':'b'}"})); + json.put("test_timestamp_higher_precision", "2025-12-01 12:34:56.123456789123+00:00"); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + ComplexRoot.getDescriptor(), COMPLEX_TABLE_SCHEMA, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testStructComplexFail() throws Exception { + JSONObject complex_lvl2 = new JSONObject(); + complex_lvl2.put("test_int", 3); + + JSONObject complex_lvl1 = new JSONObject(); + complex_lvl1.put("test_int", "not_int"); + complex_lvl1.put("complex_lvl2", complex_lvl2); + + JSONObject json = new JSONObject(); + json.put("test_int", 1); + json.put("test_string", new JSONArray(new String[] {"a", "b", "c"})); + json.put("test_bytes", ByteString.copyFromUtf8("hello")); + json.put("test_bool", true); + json.put("test_double", new JSONArray(new Double[] {1.1, 2.2, 3.3, 4.4})); + json.put("test_date", 1); + json.put("complex_lvl1", complex_lvl1); + json.put("complex_lvl2", complex_lvl2); + + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + ComplexRoot.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains("JSONObject does not have a int64 field at root.complex_lvl1.test_int.")); + } + + @Test + void testRepeatedWithMixedTypes() throws Exception { + JSONObject json = new JSONObject(); + json.put("test_repeated", new JSONArray("[1.1, 2.2, true]")); + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + RepeatedDouble.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains("JSONObject does not have a double field at root.test_repeated[2].")); + } + + @Test + void testNestedRepeatedComplex() throws Exception { + NestedRepeated expectedProto = + NestedRepeated.newBuilder() + .addDouble(1.1) + .addDouble(2.2) + .addDouble(3.3) + .addDouble(4.4) + .addDouble(5.5) + .addInt(1) + .addInt(2) + .addInt(3) + .addInt(4) + .addInt(5) + .setRepeatedString( + RepeatedString.newBuilder() + .addTestRepeated("hello") + .addTestRepeated("this") + .addTestRepeated("is") + .addTestRepeated("a") + .addTestRepeated("test") + .build()) + .build(); + double[] doubleArr = {1.1, 2.2, 3.3, 4.4, 5.5}; + String[] stringArr = {"hello", "this", "is", "a", "test"}; + int[] intArr = {1, 2, 3, 4, 5}; + + JSONObject json = new JSONObject(); + json.put("double", new JSONArray(doubleArr)); + json.put("int", new JSONArray(intArr)); + JSONObject jsonRepeatedString = new JSONObject(); + jsonRepeatedString.put("test_repeated", new JSONArray(stringArr)); + json.put("repeated_string", jsonRepeatedString); + + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(NestedRepeated.getDescriptor(), json); + assertEquals(protoMsg, expectedProto); + } + + @Test + void testNestedRepeatedComplexFail() throws Exception { + double[] doubleArr = {1.1, 2.2, 3.3, 4.4, 5.5}; + Boolean[][] fakeStringArr = {new Boolean[0], new Boolean[0]}; + int[] intArr = {1, 2, 3, 4, 5}; + + JSONObject json = new JSONObject(); + json.put("double", new JSONArray(doubleArr)); + json.put("int", new JSONArray(intArr)); + JSONObject jsonRepeatedString = new JSONObject(); + jsonRepeatedString.put("test_repeated", new JSONArray(fakeStringArr)); + json.put("repeated_string", jsonRepeatedString); + + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + NestedRepeated.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains( + "JSONObject does not have a string field at" + + " root.repeated_string.test_repeated[0].")); + } + + @Test + void testEmptySecondLevelObject() throws Exception { + ComplexLvl1 expectedProto = + ComplexLvl1.newBuilder() + .setTestInt(1) + .setComplexLvl2(ComplexLvl2.newBuilder().build()) + .build(); + JSONObject complexLvl2 = new JSONObject(); + JSONObject json = new JSONObject(); + json.put("test_int", 1); + json.put("complex_lvl2", complexLvl2); + + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(ComplexLvl1.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testAllowUnknownFieldsError() throws Exception { + JSONObject json = new JSONObject(); + json.put("test_repeated", new JSONArray(new int[] {1, 2, 3, 4, 5})); + json.put("string", "hello"); + + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + RepeatedInt64.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains("The source object has fields unknown to BigQuery: " + "root.string.")); + } + + @Test + void testEmptyProtoMessage() throws Exception { + JSONObject json = new JSONObject(); + json.put("test_repeated", new JSONArray(new int[0])); + + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(RepeatedInt64.getDescriptor(), json); + assertEquals(protoMsg.getAllFields().size(), 0); + } + + @Test + void testEmptyJSONObject() throws Exception { + JSONObject json = new JSONObject(); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage(Int64Type.getDescriptor(), json)); + assertEquals("JSONObject is empty.", e.getMessage()); + } + + @Test + void testNullJson() throws Exception { + NullPointerException e = + assertThrows( + NullPointerException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage(Int64Type.getDescriptor(), null)); + assertEquals("JSONObject is null.", e.getMessage()); + } + + @Test + void testNullDescriptor() throws Exception { + NullPointerException e = + assertThrows( + NullPointerException.class, + () -> JsonToProtoMessage.INSTANCE.convertToProtoMessage(null, new JSONObject())); + assertEquals("Protobuf descriptor is null.", e.getMessage()); + } + + @Test + void testAllowUnknownFieldsSecondLevel() throws Exception { + JSONObject complex_lvl2 = new JSONObject(); + complex_lvl2.put("no_match", 1); + JSONObject json = new JSONObject(); + json.put("test_int", 1); + json.put("complex_lvl2", complex_lvl2); + + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + ComplexLvl1.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains( + "The source object has fields unknown to BigQuery: root.complex_lvl2.no_match.")); + } + + @Test + void testTopLevelMatchSecondLevelMismatch() throws Exception { + ComplexLvl1 expectedProto = + ComplexLvl1.newBuilder() + .setTestInt(1) + .setComplexLvl2(ComplexLvl2.newBuilder().build()) + .build(); + JSONObject complex_lvl2 = new JSONObject(); + JSONObject json = new JSONObject(); + json.put("test_int", 1); + json.put("complex_lvl2", complex_lvl2); + + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(ComplexLvl1.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testJsonNullValue() throws Exception { + TestInt64 expectedProto = TestInt64.newBuilder().setInt(1).build(); + JSONObject json = new JSONObject(); + json.put("long", JSONObject.NULL); + json.put("int", 1); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestInt64.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testJsonAllFieldsNullValue() throws Exception { + TestInt64 expectedProto = TestInt64.newBuilder().build(); + JSONObject json = new JSONObject(); + json.put("long", JSONObject.NULL); + json.put("int", JSONObject.NULL); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestInt64.getDescriptor(), json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testBadJsonFieldRepeated() throws Exception { + TableSchema ts = + TableSchema.newBuilder() + .addFields( + 0, + TableFieldSchema.newBuilder() + .setName("test_repeated") + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.REPEATED) + .build()) + .build(); + JSONObject json = new JSONObject(); + json.put("test_repeated", new JSONArray(new String[] {"123", "blah"})); + + RowIndexToErrorException ex = + assertThrows( + RowIndexToErrorException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + RepeatedBytes.getDescriptor(), ts, json)); + assertTrue(ex.rowIndexToErrorMessage.size() == 1); + assertTrue(ex.getMessage().contains("root.test_repeated failed to convert to NUMERIC.")); + } + + @Test + void testBadJsonFieldIntRepeated() throws Exception { + TableSchema ts = + TableSchema.newBuilder() + .addFields( + 0, + TableFieldSchema.newBuilder() + .setName("test_repeated") + .setType(TableFieldSchema.Type.DATE) + .setMode(TableFieldSchema.Mode.REPEATED) + .build()) + .build(); + JSONObject json = new JSONObject(); + json.put("test_repeated", new JSONArray(new String[] {"blah"})); + + IllegalArgumentException ex = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + RepeatedInt32.getDescriptor(), ts, json)); + assertTrue(ex.getMessage().contains("Text 'blah' could not be parsed at index 0")); + } + + @Test + void testNullRepeatedField() throws Exception { + TableSchema ts = + TableSchema.newBuilder() + .addFields( + 0, + TableFieldSchema.newBuilder() + .setName("test_repeated") + .setType(TableFieldSchema.Type.DATE) + .setMode(TableFieldSchema.Mode.REPEATED) + .build()) + .addFields( + 1, + TableFieldSchema.newBuilder() + .setName("test_non_repeated") + .setType(TableFieldSchema.Type.DATE) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .build(); + JSONObject json = new JSONObject(); + // Null repeated field. + json.put("test_repeated", JSONObject.NULL); + + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(RepeatedInt32.getDescriptor(), ts, json); + assertTrue(protoMsg.getAllFields().isEmpty()); + + // Missing repeated field. + json = new JSONObject(); + json.put("test_non_repeated", JSONObject.NULL); + + protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(RepeatedInt32.getDescriptor(), ts, json); + assertTrue(protoMsg.getAllFields().isEmpty()); + } + + @Test + void testDoubleAndFloatToNumericConversion() { + TableSchema ts = + TableSchema.newBuilder() + .addFields( + 0, + TableFieldSchema.newBuilder() + .setName("numeric") + .setType(TableFieldSchema.Type.NUMERIC) + .build()) + .build(); + TestNumeric expectedProto = + TestNumeric.newBuilder() + .setNumeric( + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("24.678"))) + .build(); + JSONObject json = new JSONObject(); + json.put("numeric", new Double(24.678)); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestNumeric.getDescriptor(), ts, json); + assertEquals(expectedProto, protoMsg); + json.put("numeric", new Float(24.678)); + protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestNumeric.getDescriptor(), ts, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testDoubleAndFloatToNumericConversionWithJsonArray() { + TableSchema ts = + TableSchema.newBuilder() + .addFields( + 0, + TableFieldSchema.newBuilder() + .setName("numeric") + .setType(TableFieldSchema.Type.NUMERIC) + .build()) + .build(); + List protoList = new ArrayList<>(); + int protoNum = 10; + for (int i = 0; i < protoNum; i++) { + protoList.add( + TestNumeric.newBuilder() + .setNumeric( + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal("24.678" + i))) + .build()); + } + + JSONArray doubleJsonArray = new JSONArray(); + JSONArray floatJsonArray = new JSONArray(); + for (int i = 0; i < protoNum; i++) { + JSONObject doubleJson = new JSONObject(); + doubleJson.put("numeric", new Double(24.678 + (i * 0.0001))); + doubleJsonArray.put(doubleJson); + + JSONObject floatJson = new JSONObject(); + floatJson.put("numeric", new Float(24.678 + (i * 0.0001))); + floatJsonArray.put(floatJson); + } + + List protoMsgList = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestNumeric.getDescriptor(), ts, doubleJsonArray, false); + assertEquals(protoList, protoMsgList); + + protoMsgList = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestNumeric.getDescriptor(), ts, floatJsonArray, false); + assertEquals(protoList, protoMsgList); + } + + @Test + void testBigDecimalToBigNumericConversion() { + TableSchema ts = + TableSchema.newBuilder() + .addFields( + 0, + TableFieldSchema.newBuilder() + .setName("bignumeric") + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.REPEATED) + .build()) + .build(); + TestBignumeric expectedProto = + TestBignumeric.newBuilder() + .addBignumeric( + BigDecimalByteStringEncoder.encodeToBigNumericByteString( + new BigDecimal("24.6789012345"))) + .build(); + JSONObject json = new JSONObject(); + json.put("bignumeric", Collections.singletonList(new BigDecimal("24.6789012345"))); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestBignumeric.getDescriptor(), ts, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testDoubleAndFloatToRepeatedBigNumericConversion() { + TableSchema ts = + TableSchema.newBuilder() + .addFields( + 0, + TableFieldSchema.newBuilder() + .setName("bignumeric") + .setType(TableFieldSchema.Type.BIGNUMERIC) + .setMode(TableFieldSchema.Mode.REPEATED) + .build()) + .build(); + TestBignumeric expectedProto = + TestBignumeric.newBuilder() + .addBignumeric( + BigDecimalByteStringEncoder.encodeToBigNumericByteString(new BigDecimal("24.678"))) + .build(); + JSONObject json = new JSONObject(); + json.put("bignumeric", Collections.singletonList(new Double(24.678))); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestBignumeric.getDescriptor(), ts, json); + assertEquals(expectedProto, protoMsg); + json.put("bignumeric", Collections.singletonList(new Float(24.678))); + protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestBignumeric.getDescriptor(), ts, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testGetTimestampAsString() { + // String case must be in ISO8601 format + assertEquals( + "2025-10-01T12:34:56.123456+00:00", + JsonToProtoMessage.getTimestampAsString("2025-10-01 12:34:56.123456+00:00")); + assertEquals( + "2025-10-01T12:34:56.123456789123+00:00", + JsonToProtoMessage.getTimestampAsString("2025-10-01T12:34:56.123456789123+00:00")); + + // Numeric case must be micros from epoch + assertEquals("1970-01-01T00:00:00.000001+00:00", JsonToProtoMessage.getTimestampAsString(1L)); + assertEquals("1969-12-31T23:59:59.999999+00:00", JsonToProtoMessage.getTimestampAsString(-1L)); + assertEquals( + "1970-01-01T00:00:00.001234+00:00", JsonToProtoMessage.getTimestampAsString("1234")); + assertEquals("1970-01-01T00:00:00.000010+00:00", JsonToProtoMessage.getTimestampAsString(10.4)); + assertEquals( + "1969-12-31T23:59:59.999000+00:00", JsonToProtoMessage.getTimestampAsString("-1000.4")); + + // Protobuf timestamp format is converted to ISO8601 string + assertEquals( + "1970-01-02T10:17:36.000123456+00:00", + JsonToProtoMessage.getTimestampAsString( + Timestamp.newBuilder().setSeconds(123456).setNanos(123456).build())); + assertEquals( + "1969-12-30T13:42:23.999876544+00:00", + JsonToProtoMessage.getTimestampAsString( + Timestamp.newBuilder().setSeconds(-123456).setNanos(-123456).build())); + + assertThrows( + IllegalArgumentException.class, + () -> JsonToProtoMessage.getTimestampAsString("2025-10-01")); + assertThrows( + IllegalArgumentException.class, () -> JsonToProtoMessage.getTimestampAsString("abc")); + assertThrows( + IllegalArgumentException.class, + () -> JsonToProtoMessage.getTimestampAsString(Timestamp.newBuilder())); + assertThrows( + IllegalArgumentException.class, + () -> JsonToProtoMessage.getTimestampAsString(new Object())); + assertThrows( + IllegalArgumentException.class, () -> JsonToProtoMessage.getTimestampAsString(null)); + } + + @Test + void testFromEpochMicros() { + // The `+` is added if there are more than 4 digits for years + assertEquals( + "+294247-01-10T04:00:54.775807Z", + JsonToProtoMessage.fromEpochMicros(Long.MAX_VALUE).toString()); + assertEquals( + "-290308-12-21T19:59:05.224192Z", + JsonToProtoMessage.fromEpochMicros(Long.MIN_VALUE).toString()); + assertEquals(Instant.EPOCH.toString(), JsonToProtoMessage.fromEpochMicros(0L).toString()); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryRead.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryRead.java new file mode 100644 index 000000000000..731000e6a3a0 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryRead.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBigQueryRead implements MockGrpcService { + private final MockBigQueryReadImpl serviceImpl; + + public MockBigQueryRead() { + serviceImpl = new MockBigQueryReadImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryReadImpl.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryReadImpl.java new file mode 100644 index 000000000000..420ce9c07594 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryReadImpl.java @@ -0,0 +1,122 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.BetaApi; +import com.google.cloud.bigquery.storage.v1.BigQueryReadGrpc.BigQueryReadImplBase; +import com.google.protobuf.AbstractMessage; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBigQueryReadImpl extends BigQueryReadImplBase { + private List requests; + private Queue responses; + + public MockBigQueryReadImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createReadSession( + CreateReadSessionRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ReadSession) { + requests.add(request); + responseObserver.onNext(((ReadSession) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateReadSession, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ReadSession.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void readRows(ReadRowsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ReadRowsResponse) { + requests.add(request); + responseObserver.onNext(((ReadRowsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ReadRows, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ReadRowsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void splitReadStream( + SplitReadStreamRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof SplitReadStreamResponse) { + requests.add(request); + responseObserver.onNext(((SplitReadStreamResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method SplitReadStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + SplitReadStreamResponse.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWrite.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWrite.java new file mode 100644 index 000000000000..03737b87ef9f --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWrite.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBigQueryWrite implements MockGrpcService { + private final MockBigQueryWriteImpl serviceImpl; + + public MockBigQueryWrite() { + serviceImpl = new MockBigQueryWriteImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java new file mode 100644 index 000000000000..23723fb92152 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java @@ -0,0 +1,205 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.core.BetaApi; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteGrpc.BigQueryWriteImplBase; +import com.google.protobuf.AbstractMessage; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBigQueryWriteImpl extends BigQueryWriteImplBase { + private List requests; + private Queue responses; + + public MockBigQueryWriteImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createWriteStream( + CreateWriteStreamRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof WriteStream) { + requests.add(request); + responseObserver.onNext(((WriteStream) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateWriteStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + WriteStream.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public StreamObserver appendRows( + final StreamObserver responseObserver) { + StreamObserver requestObserver = + new StreamObserver() { + @Override + public void onNext(AppendRowsRequest value) { + requests.add(value); + final Object response = responses.remove(); + if (response instanceof AppendRowsResponse) { + responseObserver.onNext(((AppendRowsResponse) response)); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method AppendRows, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + AppendRowsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } + + @Override + public void onCompleted() { + responseObserver.onCompleted(); + } + }; + return requestObserver; + } + + @Override + public void getWriteStream( + GetWriteStreamRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof WriteStream) { + requests.add(request); + responseObserver.onNext(((WriteStream) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetWriteStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + WriteStream.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void finalizeWriteStream( + FinalizeWriteStreamRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof FinalizeWriteStreamResponse) { + requests.add(request); + responseObserver.onNext(((FinalizeWriteStreamResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method FinalizeWriteStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + FinalizeWriteStreamResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchCommitWriteStreams( + BatchCommitWriteStreamsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BatchCommitWriteStreamsResponse) { + requests.add(request); + responseObserver.onNext(((BatchCommitWriteStreamsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + BatchCommitWriteStreamsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void flushRows( + FlushRowsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof FlushRowsResponse) { + requests.add(request); + responseObserver.onNext(((FlushRowsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method FlushRows, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + FlushRowsResponse.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaConverterTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaConverterTest.java new file mode 100644 index 000000000000..2f60adaaad00 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaConverterTest.java @@ -0,0 +1,190 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.cloud.bigquery.storage.test.Test.*; +import com.google.protobuf.DescriptorProtos.FileDescriptorProto; +import com.google.protobuf.Descriptors; +import org.junit.jupiter.api.Test; + +public class ProtoSchemaConverterTest { + @Test + void convertSimple() { + AllSupportedTypes testProto = AllSupportedTypes.newBuilder().setStringValue("abc").build(); + ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); + assertEquals( + "name: \"com_google_cloud_bigquery_storage_test_AllSupportedTypes\"\n" + + "field {\n" + + " name: \"int32_value\"\n" + + " number: 1\n" + + " label: LABEL_OPTIONAL\n" + + " type: TYPE_INT32\n" + + "}\n" + + "field {\n" + + " name: \"int64_value\"\n" + + " number: 2\n" + + " label: LABEL_OPTIONAL\n" + + " type: TYPE_INT64\n" + + "}\n" + + "field {\n" + + " name: \"uint32_value\"\n" + + " number: 3\n" + + " label: LABEL_OPTIONAL\n" + + " type: TYPE_UINT32\n" + + "}\n" + + "field {\n" + + " name: \"uint64_value\"\n" + + " number: 4\n" + + " label: LABEL_OPTIONAL\n" + + " type: TYPE_UINT64\n" + + "}\n" + + "field {\n" + + " name: \"float_value\"\n" + + " number: 5\n" + + " label: LABEL_OPTIONAL\n" + + " type: TYPE_FLOAT\n" + + "}\n" + + "field {\n" + + " name: \"double_value\"\n" + + " number: 6\n" + + " label: LABEL_OPTIONAL\n" + + " type: TYPE_DOUBLE\n" + + "}\n" + + "field {\n" + + " name: \"bool_value\"\n" + + " number: 7\n" + + " label: LABEL_OPTIONAL\n" + + " type: TYPE_BOOL\n" + + "}\n" + + "field {\n" + + " name: \"enum_value\"\n" + + " number: 8\n" + + " label: LABEL_OPTIONAL\n" + + " type: TYPE_ENUM\n" + + " type_name: \"com_google_cloud_bigquery_storage_test_TestEnum_E.TestEnum\"\n" + + "}\n" + + "field {\n" + + " name: \"string_value\"\n" + + " number: 9\n" + + " label: LABEL_REQUIRED\n" + + " type: TYPE_STRING\n" + + "}\n" + + "nested_type {\n" + + " name: \"com_google_cloud_bigquery_storage_test_TestEnum_E\"\n" + + " enum_type {\n" + + " name: \"TestEnum\"\n" + + " value {\n" + + " name: \"TestEnum0\"\n" + + " number: 0\n" + + " }\n" + + " value {\n" + + " name: \"TestEnum1\"\n" + + " number: 1\n" + + " }\n" + + " }\n" + + "}\n", + protoSchema.getProtoDescriptor().toString()); + } + + @Test + void convertNested() { + ComplicateType testProto = ComplicateType.newBuilder().build(); + ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); + assertEquals( + "name: \"com_google_cloud_bigquery_storage_test_ComplicateType\"\n" + + "field {\n" + + " name: \"nested_repeated_type\"\n" + + " number: 1\n" + + " label: LABEL_REPEATED\n" + + " type: TYPE_MESSAGE\n" + + " type_name: \"com_google_cloud_bigquery_storage_test_NestedType\"\n" + + "}\n" + + "field {\n" + + " name: \"inner_type\"\n" + + " number: 2\n" + + " label: LABEL_OPTIONAL\n" + + " type: TYPE_MESSAGE\n" + + " type_name: \"com_google_cloud_bigquery_storage_test_InnerType\"\n" + + "}\n" + + "nested_type {\n" + + " name: \"com_google_cloud_bigquery_storage_test_InnerType\"\n" + + " field {\n" + + " name: \"value\"\n" + + " number: 1\n" + + " label: LABEL_REPEATED\n" + + " type: TYPE_STRING\n" + + " }\n" + + "}\n" + + "nested_type {\n" + + " name: \"com_google_cloud_bigquery_storage_test_NestedType\"\n" + + " field {\n" + + " name: \"inner_type\"\n" + + " number: 1\n" + + " label: LABEL_REPEATED\n" + + " type: TYPE_MESSAGE\n" + + " type_name: \"com_google_cloud_bigquery_storage_test_InnerType\"\n" + + " }\n" + + "}\n", + protoSchema.getProtoDescriptor().toString()); + } + + @Test + void convertRecursive() { + RecursiveType testProto = RecursiveType.newBuilder().build(); + InvalidArgumentException e = + assertThrows( + InvalidArgumentException.class, + () -> ProtoSchemaConverter.convert(testProto.getDescriptorForType())); + assertEquals( + "Recursive type is not supported:com.google.cloud.bigquery.storage.test.RecursiveType", + e.getMessage()); + } + + @Test + void convertRecursiveTopMessage() { + RecursiveTypeTopMessage testProto = RecursiveTypeTopMessage.newBuilder().build(); + InvalidArgumentException e = + assertThrows( + InvalidArgumentException.class, + () -> ProtoSchemaConverter.convert(testProto.getDescriptorForType())); + assertEquals( + "Recursive type is not" + + " supported:com.google.cloud.bigquery.storage.test.RecursiveTypeTopMessage", + e.getMessage()); + } + + @Test + void convertDuplicateType() throws Descriptors.DescriptorValidationException { + DuplicateType testProto = DuplicateType.newBuilder().build(); + ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); + + FileDescriptorProto fileDescriptorProto = + FileDescriptorProto.newBuilder() + .setName("foo.proto") + .addMessageType(protoSchema.getProtoDescriptor()) + .build(); + Descriptors.FileDescriptor fs = + Descriptors.FileDescriptor.buildFrom( + fileDescriptorProto, new Descriptors.FileDescriptor[0]); + Descriptors.Descriptor type = + fs.findMessageTypeByName(protoSchema.getProtoDescriptor().getName()); + assertEquals(4, type.getFields().size()); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/RequestProfilerTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/RequestProfilerTest.java new file mode 100644 index 000000000000..d9dc1ed6353c --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/RequestProfilerTest.java @@ -0,0 +1,221 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.cloud.bigquery.storage.v1.RequestProfiler.OperationName; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class RequestProfilerTest { + private static final Logger log = Logger.getLogger(RequestProfiler.class.getName()); + + private RequestProfiler.RequestProfilerHook profilerHook = + new RequestProfiler.RequestProfilerHook(true); + + @BeforeEach + void setup() { + RequestProfiler.disableAndResetProfiler(); + profilerHook.enableProfiler(); + } + + @AfterEach + void close() { + RequestProfiler.disableAndResetProfiler(); + } + + @Test + void testNormalCase() throws Exception { + profilerHook.startOperation(OperationName.TOTAL_LATENCY, "request_1"); + profilerHook.startOperation(OperationName.JSON_TO_PROTO_CONVERSION, "request_1"); + profilerHook.endOperation(OperationName.JSON_TO_PROTO_CONVERSION, "request_1"); + profilerHook.startOperation(OperationName.RESPONSE_LATENCY, "request_1"); + + // Another request starts in the middle + profilerHook.startOperation(OperationName.TOTAL_LATENCY, "request_2"); + profilerHook.startOperation(OperationName.JSON_TO_PROTO_CONVERSION, "request_2"); + profilerHook.endOperation(OperationName.JSON_TO_PROTO_CONVERSION, "request_2"); + + // Continue request 1 + profilerHook.endOperation(OperationName.RESPONSE_LATENCY, "request_1"); + + // Continue request 2 + profilerHook.startOperation(OperationName.RESPONSE_LATENCY, "request_2"); + profilerHook.endOperation(OperationName.RESPONSE_LATENCY, "request_2"); + profilerHook.endOperation(OperationName.TOTAL_LATENCY, "request_2"); + + // Continue request 1 + profilerHook.endOperation(OperationName.TOTAL_LATENCY, "request_1"); + + // Test the report generated. + String reportText = profilerHook.flushAndGenerateReportText(); + log.info(reportText); + assertTrue(reportText.contains("Request uuid: request_1 with total time")); + assertTrue(reportText.contains("Operation name json_to_proto_conversion starts at")); + assertTrue(reportText.contains("Operation name response_latency starts at")); + assertTrue(reportText.contains("Request uuid: request_2 with total time")); + + // Second time flush is called, it should generate empty report. + reportText = profilerHook.flushAndGenerateReportText(); + assertTrue(reportText.contains("0 requests finished during")); + } + + @Test + void mixFinishedAndUnfinishedRequest() throws Exception { + // Start request 1. + profilerHook.startOperation(OperationName.TOTAL_LATENCY, "request_1"); + profilerHook.startOperation(OperationName.JSON_TO_PROTO_CONVERSION, "request_1"); + profilerHook.endOperation(OperationName.JSON_TO_PROTO_CONVERSION, "request_1"); + profilerHook.startOperation(OperationName.RESPONSE_LATENCY, "request_1"); + + // Another request starts in the middle + profilerHook.startOperation(OperationName.TOTAL_LATENCY, "request_2"); + profilerHook.startOperation(OperationName.JSON_TO_PROTO_CONVERSION, "request_2"); + + // First report should be empty since no requests end. + String reportText = profilerHook.flushAndGenerateReportText(); + assertTrue(reportText.contains("0 requests finished during")); + + // End one of them. + profilerHook.endOperation(OperationName.TOTAL_LATENCY, "request_1"); + reportText = profilerHook.flushAndGenerateReportText(); + assertTrue(reportText.contains("Request uuid: request_1 with total time")); + + // End another, expect the first request's log not showing up. + profilerHook.endOperation(OperationName.TOTAL_LATENCY, "request_2"); + reportText = profilerHook.flushAndGenerateReportText(); + assertTrue(!reportText.contains("Request uuid: request_1 with total time")); + assertTrue(reportText.contains("Request uuid: request_2 with total time")); + + // Flush again will be empty report. + reportText = profilerHook.flushAndGenerateReportText(); + assertTrue(reportText.contains("0 requests finished during")); + } + + @Test + void concurrentProfilingTest_1000ReqsRunTogether() throws Exception { + int totalRequest = 1000; + ListeningExecutorService threadPool = + MoreExecutors.listeningDecorator( + Executors.newCachedThreadPool( + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("TestThread").build())); + + List> futures = new ArrayList<>(); + // Make some request particularly slower than the others. + ImmutableSet slowRequestIndex = ImmutableSet.of(10, 15, 20, 25, 30, 40, 50); + for (int i = 0; i < totalRequest; i++) { + int finalI = i; + futures.add( + threadPool.submit( + () -> { + String uuid = String.format("request_%s", finalI); + profilerHook.startOperation(OperationName.TOTAL_LATENCY, uuid); + profilerHook.startOperation(OperationName.JSON_TO_PROTO_CONVERSION, uuid); + if (slowRequestIndex.contains(finalI)) { + try { + TimeUnit.MILLISECONDS.sleep(finalI * 100); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + profilerHook.endOperation(OperationName.JSON_TO_PROTO_CONVERSION, uuid); + profilerHook.startOperation(OperationName.JSON_TO_PROTO_CONVERSION, uuid); + profilerHook.endOperation(OperationName.JSON_TO_PROTO_CONVERSION, uuid); + profilerHook.startOperation(OperationName.WAIT_QUEUE, uuid); + profilerHook.endOperation(OperationName.WAIT_QUEUE, uuid); + profilerHook.endOperation(OperationName.TOTAL_LATENCY, uuid); + })); + } + + // Wait all requests to finish. + for (int i = 0; i < futures.size(); i++) { + futures.get(i).get(); + } + String reportText = profilerHook.flushAndGenerateReportText(); + assertTrue(reportText.contains("During the last 60000 milliseconds at system time")); + assertTrue(reportText.contains("in total 1000 requests finished")); + assertTrue(reportText.contains("Request uuid: request_50 with total time")); + assertTrue(reportText.contains("Request uuid: request_40 with total time")); + assertTrue(reportText.contains("Request uuid: request_30 with total time")); + assertTrue(reportText.contains("Request uuid: request_25 with total time")); + assertTrue(reportText.contains("Request uuid: request_20 with total time")); + + threadPool.shutdown(); + threadPool.awaitTermination(10, TimeUnit.SECONDS); + } + + @Test + void concurrentProfilingTest_RunWhileFlushing() throws Exception { + int totalRequest = 1000; + ListeningExecutorService threadPool = + MoreExecutors.listeningDecorator( + Executors.newCachedThreadPool( + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("TestThread").build())); + + List> futures = new ArrayList<>(); + // Make some request particularly slower than the others. + ImmutableSet slowRequestIndex = ImmutableSet.of(10, 15, 20, 25, 30, 40, 50); + for (int i = 0; i < totalRequest; i++) { + int finalI = i; + futures.add( + threadPool.submit( + () -> { + try { + String uuid = String.format("request_%s", finalI); + profilerHook.startOperation(OperationName.TOTAL_LATENCY, uuid); + profilerHook.startOperation(OperationName.JSON_TO_PROTO_CONVERSION, uuid); + if (slowRequestIndex.contains(finalI)) { + TimeUnit.MILLISECONDS.sleep(finalI * 100); + } + profilerHook.endOperation(OperationName.JSON_TO_PROTO_CONVERSION, uuid); + profilerHook.startOperation(OperationName.JSON_TO_PROTO_CONVERSION, uuid); + profilerHook.endOperation(OperationName.JSON_TO_PROTO_CONVERSION, uuid); + profilerHook.startOperation(OperationName.WAIT_QUEUE, uuid); + profilerHook.endOperation(OperationName.WAIT_QUEUE, uuid); + profilerHook.endOperation(OperationName.TOTAL_LATENCY, uuid); + String unused = profilerHook.flushAndGenerateReportText(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + })); + } + + // Wait all requests to finish. + for (int i = 0; i < futures.size(); i++) { + futures.get(i).get(); + } + String reportText = profilerHook.flushAndGenerateReportText(); + assertTrue(reportText.contains("0 requests finished during")); + + threadPool.shutdown(); + threadPool.awaitTermination(10, TimeUnit.SECONDS); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/StreamWriterTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/StreamWriterTest.java new file mode 100644 index 000000000000..015d0fabc0dc --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/StreamWriterTest.java @@ -0,0 +1,2594 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static com.google.common.truth.Truth.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.api.client.util.Sleeper; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.batching.FlowController; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.AbortedException; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode.Code; +import com.google.api.gax.rpc.UnknownException; +import com.google.auth.oauth2.UserCredentials; +import com.google.cloud.bigquery.storage.test.Test.FooType; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation; +import com.google.cloud.bigquery.storage.v1.ConnectionWorkerPool.Settings; +import com.google.cloud.bigquery.storage.v1.Exceptions.StreamWriterClosedException; +import com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode; +import com.google.cloud.bigquery.storage.v1.StreamWriter.SingleConnectionOrConnectionPool.Kind; +import com.google.common.base.Strings; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.DescriptorProtos; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import com.google.protobuf.Int64Value; +import com.google.protobuf.Timestamp; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.opentelemetry.api.common.Attributes; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.channels.Channels; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Supplier; +import java.util.logging.Logger; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.VarCharVector; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.VectorUnloader; +import org.apache.arrow.vector.compression.CompressionCodec; +import org.apache.arrow.vector.compression.CompressionUtil; +import org.apache.arrow.vector.compression.NoCompressionCodec; +import org.apache.arrow.vector.ipc.WriteChannel; +import org.apache.arrow.vector.ipc.message.ArrowRecordBatch; +import org.apache.arrow.vector.ipc.message.MessageSerializer; +import org.apache.arrow.vector.types.pojo.ArrowType; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.types.pojo.FieldType; +import org.apache.arrow.vector.types.pojo.Schema; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class StreamWriterTest { + + private static final Logger log = Logger.getLogger(StreamWriterTest.class.getName()); + private static final String TEST_STREAM_1 = "projects/p/datasets/d1/tables/t1/streams/_default"; + private static final String TEST_STREAM_2 = "projects/p/datasets/d2/tables/t2/streams/_default"; + private static final String TEST_STREAM_3 = "projects/p/datasets/d3/tables/t3/streams/_default"; + private static final String TEST_STREAM_SHORTEN = "projects/p/datasets/d2/tables/t2/_default"; + private static final String EXPLICIT_STREAM = "projects/p/datasets/d1/tables/t1/streams/s1"; + private static final String TEST_TRACE_ID = "DATAFLOW:job_id"; + private static final int MAX_RETRY_NUM_ATTEMPTS = 3; + private static final long INITIAL_RETRY_MILLIS = 500; + private static final double RETRY_MULTIPLIER = 1.3; + private static final int MAX_RETRY_DELAY_MINUTES = 5; + private static final RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(java.time.Duration.ofMillis(INITIAL_RETRY_MILLIS)) + .setRetryDelayMultiplier(RETRY_MULTIPLIER) + .setMaxAttempts(MAX_RETRY_NUM_ATTEMPTS) + .setMaxRetryDelayDuration(java.time.Duration.ofMinutes(MAX_RETRY_DELAY_MINUTES)) + .build(); + private FakeScheduledExecutorService fakeExecutor; + private FakeBigQueryWrite testBigQueryWrite; + private static MockServiceHelper serviceHelper; + private BigQueryWriteClient client; + private final TableFieldSchema FOO = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("foo") + .build(); + private final TableFieldSchema BAR = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("bar") + .build(); + private final TableSchema TABLE_SCHEMA = TableSchema.newBuilder().addFields(0, FOO).build(); + private final ProtoSchema PROTO_SCHEMA = + ProtoSchemaConverter.convert( + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(TABLE_SCHEMA)); + + private final Schema ARROW_SCHEMA; + private final ArrowSchema SERIALIZED_ARROW_SCHEMA; + + private final TableSchema UPDATED_TABLE_SCHEMA = + TableSchema.newBuilder().addFields(0, FOO).addFields(1, BAR).build(); + private final ProtoSchema UPDATED_PROTO_SCHEMA = + ProtoSchemaConverter.convert( + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor( + UPDATED_TABLE_SCHEMA)); + private static final BufferAllocator allocator = new RootAllocator(); + + StreamWriterTest() throws DescriptorValidationException { + Field foo = new Field("foo", FieldType.nullable(new ArrowType.Utf8()), null); + ARROW_SCHEMA = new Schema(Arrays.asList(foo)); + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + try { + MessageSerializer.serialize(new WriteChannel(Channels.newChannel(out)), ARROW_SCHEMA); + } catch (IOException e) { + throw new IllegalStateException("Failed to serialize arrow schema.", e); + } + byte[] bytes = out.toByteArray(); + SERIALIZED_ARROW_SCHEMA = + ArrowSchema.newBuilder().setSerializedSchema(ByteString.copyFrom(bytes)).build(); + } + + @BeforeEach + void setUp() throws Exception { + testBigQueryWrite = new FakeBigQueryWrite(); + StreamWriter.setMaxRequestCallbackWaitTime(java.time.Duration.ofSeconds(10000)); + ConnectionWorker.setMaxInflightQueueWaitTime(300000); + serviceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(testBigQueryWrite)); + serviceHelper.start(); + fakeExecutor = new FakeScheduledExecutorService(); + testBigQueryWrite.setExecutor(fakeExecutor); + client = + BigQueryWriteClient.create( + BigQueryWriteSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setTransportChannelProvider(serviceHelper.createChannelProvider()) + .build()); + StreamWriter.cleanUp(); + } + + @AfterEach + void tearDown() throws Exception { + log.info("tearDown called"); + serviceHelper.stop(); + StreamWriter.cleanUp(); + + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + + private StreamWriter getMultiplexingTestStreamWriter() throws IOException { + return StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setTraceId(TEST_TRACE_ID) + .setLocation("US") + .setEnableConnectionPool(true) + .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) + // We won't test profiler behavior in this test, it's a sanity check. + .setEnableLatencyProfiler(true) + .build(); + } + + private StreamWriter.Builder getTestStreamWriterBuilder() throws IOException { + return StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setTraceId(TEST_TRACE_ID) + .setMaxRetryDuration(java.time.Duration.ofSeconds(5)); + } + + private StreamWriter getTestStreamWriter() throws IOException { + return getTestStreamWriterBuilder().build(); + } + + private StreamWriter getTestStreamWriterRetryEnabled() throws IOException { + return StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setTraceId(TEST_TRACE_ID) + .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) + .setRetrySettings(retrySettings) + .build(); + } + + private StreamWriter getTestStreamWriterExclusiveRetryEnabled() throws IOException { + return StreamWriter.newBuilder(EXPLICIT_STREAM, client) + .setWriterSchema(createProtoSchema()) + .setTraceId(TEST_TRACE_ID) + .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) + .setRetrySettings(retrySettings) + .build(); + } + + private StreamWriter getTestStreamWriterExclusiveRetryEnabledWithArrowSchema() + throws IOException { + return StreamWriter.newBuilder(EXPLICIT_STREAM, client) + .setWriterSchema(SERIALIZED_ARROW_SCHEMA) + .setTraceId(TEST_TRACE_ID) + .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) + .setRetrySettings(retrySettings) + .build(); + } + + private StreamWriter getTestStreamWriterExclusiveRetryEnabledWithUnserialiedArrowSchema() + throws IOException { + return StreamWriter.newBuilder(EXPLICIT_STREAM, client) + .setWriterSchema(ARROW_SCHEMA) + .setTraceId(TEST_TRACE_ID) + .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) + .setRetrySettings(retrySettings) + .build(); + } + + private ProtoSchema createProtoSchema() { + return createProtoSchema("foo"); + } + + private ProtoSchema createProtoSchema(String fieldName) { + return ProtoSchema.newBuilder() + .setProtoDescriptor( + DescriptorProtos.DescriptorProto.newBuilder() + .setName("Message") + .addField( + DescriptorProtos.FieldDescriptorProto.newBuilder() + .setName(fieldName) + .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING) + .setNumber(1) + .build()) + .build()) + .build(); + } + + private ProtoRows createProtoRows(String[] messages) { + ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); + for (String message : messages) { + FooType foo = FooType.newBuilder().setFoo(message).build(); + rowsBuilder.addSerializedRows(foo.toByteString()); + } + return rowsBuilder.build(); + } + + private com.google.cloud.bigquery.storage.v1.ArrowRecordBatch createArrowRecordBatch( + String[] messages) { + // Mostly copied from arrow public doc, however we can't directly use the ArrowStreamWriter + // as we need the raw record batch instead of IPC stream format. + try (VectorSchemaRoot vectorSchemaRoot = VectorSchemaRoot.create(ARROW_SCHEMA, allocator)) { + VarCharVector fooVector = (VarCharVector) vectorSchemaRoot.getVector("foo"); + fooVector.allocateNew(messages.length); + for (int i = 0; i < messages.length; i++) { + fooVector.set(i, messages[i].getBytes(UTF_8)); + } + vectorSchemaRoot.setRowCount(messages.length); + + CompressionCodec codec = + NoCompressionCodec.Factory.INSTANCE.createCodec(CompressionUtil.CodecType.NO_COMPRESSION); + VectorUnloader vectorUnloader = + new VectorUnloader( + vectorSchemaRoot, /* includeNullCount= */ true, codec, /* alignBuffers= */ true); + ArrowRecordBatch recordBatch = vectorUnloader.getRecordBatch(); + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + try { + MessageSerializer.serialize(new WriteChannel(Channels.newChannel(out)), recordBatch); + } catch (IOException e) { + throw new IllegalStateException("Failed to serialize arrow rows.", e); + } + ByteString serializedArrowRows = ByteString.copyFrom(out.toByteArray()); + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder recordBatchBuilder = + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.newBuilder() + .setSerializedRecordBatch(serializedArrowRows); + + return recordBatchBuilder.build(); + } + } + + private com.google.cloud.bigquery.storage.v1.ArrowRecordBatch createArrowRecordBatch( + Schema arrowSchema, BufferAllocator allocator, String[] messages) { + try (VectorSchemaRoot vectorSchemaRoot = VectorSchemaRoot.create(arrowSchema, allocator)) { + VarCharVector varCharVector = (VarCharVector) vectorSchemaRoot.getVector(0); + varCharVector.allocateNew(messages.length); + for (int i = 0; i < messages.length; i++) { + varCharVector.set(i, messages[i].getBytes(UTF_8)); + } + vectorSchemaRoot.setRowCount(messages.length); + + VectorUnloader vectorUnloader = new VectorUnloader(vectorSchemaRoot); + try (final ArrowRecordBatch recordBatch = vectorUnloader.getRecordBatch()) { + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + MessageSerializer.serialize(new WriteChannel(Channels.newChannel(out)), recordBatch); + ByteString serialized = ByteString.copyFrom(out.toByteArray()); + return com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.newBuilder() + .setSerializedRecordBatch(serialized) + .build(); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private AppendRowsResponse createAppendResponse(long offset) { + return AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(offset)).build()) + .build(); + } + + private AppendRowsResponse createAppendResponseWithError(Status.Code code, String message) { + return AppendRowsResponse.newBuilder() + .setError(com.google.rpc.Status.newBuilder().setCode(code.value()).setMessage(message)) + .build(); + } + + private ApiFuture sendTestMessage(StreamWriter writer, String[] messages) { + return writer.append(createProtoRows(messages)); + } + + private ApiFuture sendTestMessage( + StreamWriter writer, String[] messages, long offset) { + return writer.append(createProtoRows(messages), offset); + } + + private static T assertFutureException( + Class expectedThrowable, final Future future) { + return assertThrows( + expectedThrowable, + () -> { + try { + future.get(); + } catch (ExecutionException ex) { + // Future wraps exception with ExecutionException. So unwrapper it here. + throw ex.getCause(); + } + }); + } + + private void verifyAppendIsBlocked(final StreamWriter writer) throws Exception { + Thread appendThread = + new Thread( + new Runnable() { + @Override + public void run() { + sendTestMessage(writer, new String[] {"A"}); + } + }); + // Start a separate thread to append and verify that it is still alive after 2 seoncds. + appendThread.start(); + TimeUnit.SECONDS.sleep(2); + assertTrue(appendThread.isAlive()); + appendThread.interrupt(); + } + + private void verifyAppendRequests(long appendCount) { + assertEquals(appendCount, testBigQueryWrite.getAppendRequests().size()); + for (int i = 0; i < appendCount; i++) { + AppendRowsRequest serverRequest = testBigQueryWrite.getAppendRequests().get(i); + assertTrue(serverRequest.getProtoRows().getRows().getSerializedRowsCount() > 0); + assertEquals(i, serverRequest.getOffset().getValue()); + if (i == 0) { + // First request received by server should have schema and stream name. + assertTrue(serverRequest.getProtoRows().hasWriterSchema()); + assertEquals(TEST_STREAM_1, serverRequest.getWriteStream()); + assertEquals("java-streamwriter " + TEST_TRACE_ID, serverRequest.getTraceId()); + } else { + // Following request should not have schema and stream name. + assertFalse(serverRequest.getProtoRows().hasWriterSchema()); + assertEquals("", serverRequest.getWriteStream()); + assertEquals("", serverRequest.getTraceId()); + } + } + } + + void testBuildBigQueryWriteClientInWriter() throws Exception { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setChannelProvider(serviceHelper.createChannelProvider()) + .setWriterSchema(createProtoSchema()) + .build(); + + testBigQueryWrite.addResponse(createAppendResponse(0)); + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + writer.close(); + } + + /* DummyResponseSupplierWillFailThenSucceed is used to mock repeated failures, such as retriable + * in-stream errors. This Supplier will fail up to totalFailCount with status failStatus. Once + * totalFailCount is reached, then the provided Response will be returned instead. + */ + private static class DummyResponseSupplierWillFailThenSucceed + implements Supplier { + + private final int totalFailCount; + private int failCount; + private final com.google.rpc.Status failStatus; + private final FakeBigQueryWriteImpl.Response response; + + DummyResponseSupplierWillFailThenSucceed( + FakeBigQueryWriteImpl.Response response, + int totalFailCount, + com.google.rpc.Status failStatus) { + this.totalFailCount = totalFailCount; + this.response = response; + this.failStatus = failStatus; + this.failCount = 0; + } + + @Override + public FakeBigQueryWriteImpl.Response get() { + if (failCount >= totalFailCount) { + return response; + } + failCount++; + return new FakeBigQueryWriteImpl.Response( + AppendRowsResponse.newBuilder().setError(this.failStatus).build()); + } + } + + @Test + void testAppendSuccess() throws Exception { + StreamWriter writer = getTestStreamWriter(); + + long appendCount = 100; + for (int i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + List> futures = new ArrayList<>(); + for (int i = 0; i < appendCount; i++) { + futures.add(writer.append(createProtoRows(new String[] {String.valueOf(i)}), i)); + } + + for (int i = 0; i < appendCount; i++) { + assertEquals(i, futures.get(i).get().getAppendResult().getOffset().getValue()); + } + + verifyAppendRequests(appendCount); + + writer.close(); + } + + @Test + void testAppendSuccess_RetryDirectlyInCallback() throws Exception { + // Set a relatively small in flight request counts. + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setTraceId(TEST_TRACE_ID) + .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) + .setMaxInflightRequests(5) + .build(); + + // Fail the first request, in the request callback of the first request we will insert another + // 10 requests. Those requests can't be processed until the previous request callback has + // been finished. + long appendCount = 20; + for (int i = 0; i < appendCount; i++) { + if (i == 0) { + testBigQueryWrite.addResponse( + createAppendResponseWithError(Status.INVALID_ARGUMENT.getCode(), "test message")); + } + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + // We will trigger 10 more requests in the request callback of the following request. + ProtoRows protoRows = createProtoRows(new String[] {String.valueOf(-1)}); + ApiFuture future = writer.append(protoRows, -1); + ApiFutures.addCallback( + future, new AppendCompleteCallback(writer, protoRows), MoreExecutors.directExecutor()); + + StatusRuntimeException actualError = + assertFutureException(StatusRuntimeException.class, future); + + Sleeper.DEFAULT.sleep(1000); + writer.close(); + } + + static class AppendCompleteCallback implements ApiFutureCallback { + + private final StreamWriter mainStreamWriter; + private final ProtoRows protoRows; + private int retryCount = 0; + + public AppendCompleteCallback(StreamWriter mainStreamWriter, ProtoRows protoRows) { + this.mainStreamWriter = mainStreamWriter; + this.protoRows = protoRows; + } + + public void onSuccess(AppendRowsResponse response) { + // Donothing + } + + public void onFailure(Throwable throwable) { + for (int i = 0; i < 10; i++) { + this.mainStreamWriter.append(protoRows); + } + } + } + + @Test + void testUpdatedSchemaFetch_multiplexing() throws Exception { + testUpdatedSchemaFetch(/* enableMultiplexing= */ true); + } + + @Test + void testUpdatedSchemaFetch_nonMultiplexing() throws Exception { + testUpdatedSchemaFetch(/* enableMultiplexing= */ false); + } + + private void testUpdatedSchemaFetch(boolean enableMultiplexing) + throws IOException, ExecutionException, InterruptedException { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setChannelProvider(serviceHelper.createChannelProvider()) + .setWriterSchema(PROTO_SCHEMA) + .setEnableConnectionPool(enableMultiplexing) + .setLocation("us") + .build(); + testBigQueryWrite.addResponse( + AppendRowsResponse.newBuilder() + .setAppendResult( + AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) + .setUpdatedSchema(UPDATED_TABLE_SCHEMA) + .setWriteStream(TEST_STREAM_1) + .build()); + + assertEquals(writer.getUpdatedSchema(), null); + AppendRowsResponse response = + writer.append(createProtoRows(new String[] {String.valueOf(0)}), 0).get(); + assertEquals(writer.getUpdatedSchema(), UPDATED_TABLE_SCHEMA); + + // Create another writer, although it's the same stream name but the time stamp is newer, thus + // the old updated schema won't get returned. + StreamWriter writer2 = + StreamWriter.newBuilder(TEST_STREAM_1) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setChannelProvider(serviceHelper.createChannelProvider()) + .setWriterSchema(PROTO_SCHEMA) + .setEnableConnectionPool(enableMultiplexing) + .setLocation("us") + .build(); + assertEquals(writer2.getUpdatedSchema(), null); + } + + @Test + void testNoSchema() throws Exception { + StatusRuntimeException ex = + assertThrows( + StatusRuntimeException.class, + () -> { + StreamWriter.newBuilder(TEST_STREAM_1, client).build(); + }); + assertEquals(ex.getStatus().getCode(), Status.INVALID_ARGUMENT.getCode()); + assertTrue(ex.getStatus().getDescription().contains("Writer schema must be provided")); + } + + @Test + void testInvalidTraceId() throws Exception { + assertThrows( + IllegalArgumentException.class, + () -> { + StreamWriter.newBuilder(TEST_STREAM_1).setTraceId("abc"); + }); + assertThrows( + IllegalArgumentException.class, + () -> { + StreamWriter.newBuilder(TEST_STREAM_1).setTraceId("abc:"); + }); + assertThrows( + IllegalArgumentException.class, + () -> { + StreamWriter.newBuilder(TEST_STREAM_1).setTraceId(":abc"); + }); + } + + @Test + void testEnableConnectionPoolOnExplicitStream() throws Exception { + IllegalArgumentException ex = + assertThrows( + IllegalArgumentException.class, + () -> { + StreamWriter.newBuilder(EXPLICIT_STREAM, client) + .setEnableConnectionPool(true) + .build(); + }); + assertTrue(ex.getMessage().contains("Trying to enable connection pool in non-default stream.")); + } + + @Test + void testShortenStreamNameAllowed() throws Exception { + // no exception is thrown. + StreamWriter.newBuilder(TEST_STREAM_SHORTEN, client) + .setEnableConnectionPool(true) + .setLocation("us") + .build(); + } + + @Test + void testAppendSuccessAndConnectionError() throws Exception { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setTraceId(TEST_TRACE_ID) + // Retry expire immediately. + .setMaxRetryDuration(java.time.Duration.ofMillis(1L)) + .build(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addException(Status.INTERNAL.asException()); + testBigQueryWrite.addException(Status.INTERNAL.asException()); + testBigQueryWrite.addException(Status.INTERNAL.asException()); + + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + ApiException actualError = assertFutureException(ApiException.class, appendFuture2); + assertEquals(Code.INTERNAL, actualError.getStatusCode().getCode()); + + writer.close(); + } + + @Test + void testAppendSuccessAndInStreamError() throws Exception { + StreamWriter writer = getTestStreamWriter(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse( + createAppendResponseWithError(Status.INVALID_ARGUMENT.getCode(), "test message")); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); + ApiFuture appendFuture3 = sendTestMessage(writer, new String[] {"C"}); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + StatusRuntimeException actualError = + assertFutureException(StatusRuntimeException.class, appendFuture2); + assertEquals(Status.Code.INVALID_ARGUMENT, actualError.getStatus().getCode()); + assertEquals("test message", actualError.getStatus().getDescription()); + assertEquals(1, appendFuture3.get().getAppendResult().getOffset().getValue()); + + writer.close(); + } + + @Test + void testAppendFailedSchemaError() throws Exception { + StreamWriter writer = getTestStreamWriter(); + + StorageError storageError = + StorageError.newBuilder() + .setCode(StorageErrorCode.SCHEMA_MISMATCH_EXTRA_FIELDS) + .setEntity("foobar") + .build(); + com.google.rpc.Status statusProto = + com.google.rpc.Status.newBuilder() + .setCode(Code.INVALID_ARGUMENT.ordinal()) + .addDetails(Any.pack(storageError)) + .build(); + + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setError(statusProto).build()); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); + ApiFuture appendFuture3 = sendTestMessage(writer, new String[] {"C"}); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + Exceptions.SchemaMismatchedException actualError = + assertFutureException(Exceptions.SchemaMismatchedException.class, appendFuture2); + assertEquals("foobar", actualError.getStreamName()); + assertEquals(1, appendFuture3.get().getAppendResult().getOffset().getValue()); + + writer.close(); + } + + @Test + void testAppendFailRandomException() throws Exception { + StreamWriter writer = getTestStreamWriter(); + // Trigger a non-StatusRuntimeException for append operation (although grpc API should not + // return anything other than StatusRuntimeException) + IllegalArgumentException illegalArgumentException = + new IllegalArgumentException("Illegal argument"); + testBigQueryWrite.addException(illegalArgumentException); + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + UnknownException actualError = assertFutureException(UnknownException.class, appendFuture1); + assertEquals(Code.UNKNOWN, actualError.getStatusCode().getCode()); + + writer.close(); + } + + @Test + void longIdleBetweenAppends() throws Exception { + StreamWriter writer = getTestStreamWriter(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + + // Sleep to create a long idle between appends. + TimeUnit.SECONDS.sleep(3); + + ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); + assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); + + writer.close(); + } + + @Test + void testAppendAfterUserClose() throws Exception { + StreamWriter writer = getTestStreamWriter(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + writer.close(); + ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertTrue(appendFuture2.isDone()); + StatusRuntimeException actualError = + assertFutureException(StatusRuntimeException.class, appendFuture2); + assertEquals(Status.Code.FAILED_PRECONDITION, actualError.getStatus().getCode()); + } + + @Test + void testAppendAfterServerClose() throws Exception { + StreamWriter writer = getTestStreamWriter(); + testBigQueryWrite.addException(Status.INVALID_ARGUMENT.asException()); + + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + ApiException error1 = assertFutureException(ApiException.class, appendFuture1); + assertEquals(Code.INVALID_ARGUMENT, error1.getStatusCode().getCode()); + + ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); + assertTrue(appendFuture2.isDone()); + StatusRuntimeException error2 = + assertFutureException(StatusRuntimeException.class, appendFuture2); + assertEquals(Status.Code.FAILED_PRECONDITION, error2.getStatus().getCode()); + + writer.close(); + } + + @Test + void userCloseWhileRequestInflight() throws Exception { + final StreamWriter writer = getTestStreamWriter(); + // Server will sleep 2 seconds before sending back the response. + testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(2)); + testBigQueryWrite.addResponse(createAppendResponse(0)); + + // Send a request and close the stream in separate thread while the request is inflight. + final ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + Thread closeThread = + new Thread( + new Runnable() { + @Override + public void run() { + writer.close(); + } + }); + closeThread.start(); + + // Due to the sleep on server, the append won't finish within 1 second even though stream + // is being closed. + assertThrows( + TimeoutException.class, + () -> { + appendFuture1.get(1, TimeUnit.SECONDS); + }); + + // Within 2 seconds, the request should be done and stream should be closed. + closeThread.join(2000); + assertTrue(appendFuture1.isDone()); + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + } + + @Test + void serverCloseWhileRequestsInflight() throws Exception { + StreamWriter writer = getTestStreamWriter(); + // Server will sleep 2 seconds before closing the connection. + testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(2)); + testBigQueryWrite.addException(Status.INVALID_ARGUMENT.asException()); + + // Send 10 requests, so that there are 10 inflight requests. + int appendCount = 10; + List> futures = new ArrayList<>(); + for (int i = 0; i < appendCount; i++) { + futures.add(sendTestMessage(writer, new String[] {String.valueOf(i)})); + } + + // Server close should properly handle all inflight requests. + for (int i = 0; i < appendCount; i++) { + if (i == 0) { + ApiException actualError = assertFutureException(ApiException.class, futures.get(i)); + assertEquals(Code.INVALID_ARGUMENT, actualError.getStatusCode().getCode()); + } else { + assertFutureException(StreamWriterClosedException.class, futures.get(i)); + } + } + + writer.close(); + } + + @Test + void testZeroMaxInflightRequests() throws Exception { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setMaxInflightRequests(0) + .build(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + verifyAppendIsBlocked(writer); + writer.close(); + } + + @Test + void testZeroMaxInflightBytes() throws Exception { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setMaxInflightBytes(0) + .build(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + verifyAppendIsBlocked(writer); + writer.close(); + } + + @Test + void testOneMaxInflightRequests() throws Exception { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setMaxInflightRequests(1) + .build(); + // Server will sleep 1 second before every response. + testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(1)); + testBigQueryWrite.addResponse(createAppendResponse(0)); + + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + assertTrue(writer.getInflightWaitSeconds() >= 1); + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + writer.close(); + } + + @Test + void testOneMaxInflightRequests_MultiplexingCase() throws Exception { + ConnectionWorkerPool.setOptions(Settings.builder().setMaxConnectionsPerRegion(2).build()); + StreamWriter writer1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setLocation("US") + .setEnableConnectionPool(true) + .setMaxInflightRequests(1) + .build(); + StreamWriter writer2 = + StreamWriter.newBuilder(TEST_STREAM_2, client) + .setWriterSchema(createProtoSchema()) + .setMaxInflightRequests(1) + .setEnableConnectionPool(true) + .setMaxInflightRequests(1) + .setLocation("US") + .build(); + + // Server will sleep 1 second before every response. + testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(1)); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = sendTestMessage(writer1, new String[] {"A"}); + ApiFuture appendFuture2 = sendTestMessage(writer2, new String[] {"A"}); + + assertTrue(writer1.getInflightWaitSeconds() >= 1); + assertTrue(writer2.getInflightWaitSeconds() >= 1); + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); + writer1.close(); + writer2.close(); + } + + @Test + void testOpenTelemetryAttributes_MultiplexingCase() throws Exception { + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); + StreamWriter writer1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setLocation("US") + .setEnableConnectionPool(true) + .setEnableOpenTelemetry(true) + .build(); + StreamWriter writer2 = + StreamWriter.newBuilder(TEST_STREAM_2, client) + .setWriterSchema(createProtoSchema()) + .setLocation("US") + .setEnableConnectionPool(true) + .setEnableOpenTelemetry(true) + .build(); + + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = sendTestMessage(writer1, new String[] {"A"}); + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + Attributes attributes = writer1.getTelemetryAttributes(); + String attributesTableId = attributes.get(TelemetryMetrics.telemetryKeyTableId); + assertEquals("projects/p/datasets/d1/tables/t1", attributesTableId); + + ApiFuture appendFuture2 = sendTestMessage(writer2, new String[] {"A"}); + assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); + attributes = writer2.getTelemetryAttributes(); + attributesTableId = attributes.get(TelemetryMetrics.telemetryKeyTableId); + assertEquals("projects/p/datasets/d2/tables/t2", attributesTableId); + + writer1.close(); + writer2.close(); + } + + @Test + void testProtoSchemaPiping_nonMultiplexingCase() throws Exception { + ProtoSchema protoSchema = createProtoSchema(); + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(protoSchema) + .setMaxInflightBytes(1) + .build(); + long appendCount = 5; + for (int i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + List> futures = new ArrayList<>(); + for (int i = 0; i < appendCount; i++) { + futures.add(writer.append(createProtoRows(new String[] {String.valueOf(i)}), i)); + } + + for (int i = 0; i < appendCount; i++) { + assertEquals(i, futures.get(i).get().getAppendResult().getOffset().getValue()); + } + assertEquals(appendCount, testBigQueryWrite.getAppendRequests().size()); + for (int i = 0; i < appendCount; i++) { + AppendRowsRequest appendRowsRequest = testBigQueryWrite.getAppendRequests().get(i); + assertEquals(i, appendRowsRequest.getOffset().getValue()); + if (i == 0) { + appendRowsRequest.getProtoRows().getWriterSchema().equals(protoSchema); + assertEquals(appendRowsRequest.getWriteStream(), TEST_STREAM_1); + } else { + appendRowsRequest.getProtoRows().getWriterSchema().equals(ProtoSchema.getDefaultInstance()); + } + } + writer.close(); + } + + @Test + void testProtoSchemaPiping_multiplexingCase() throws Exception { + // Use the shared connection mode. + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); + ProtoSchema schema1 = createProtoSchema("Schema1"); + ProtoSchema schema2 = createProtoSchema("Schema2"); + StreamWriter writer1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(schema1) + .setLocation("US") + .setEnableConnectionPool(true) + .setMaxInflightRequests(1) + .build(); + StreamWriter writer2 = + StreamWriter.newBuilder(TEST_STREAM_2, client) + .setWriterSchema(schema2) + .setMaxInflightRequests(1) + .setEnableConnectionPool(true) + .setLocation("US") + .build(); + + long appendCountPerStream = 5; + for (int i = 0; i < appendCountPerStream * 4; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + List> futures = new ArrayList<>(); + // In total insert append `appendCountPerStream` * 4 requests. + // We insert using the pattern of streamWriter1, streamWriter1, streamWriter2, streamWriter2 + for (int i = 0; i < appendCountPerStream; i++) { + futures.add(writer1.append(createProtoRows(new String[] {String.valueOf(i)}), i * 4)); + futures.add(writer1.append(createProtoRows(new String[] {String.valueOf(i)}), i * 4 + 1)); + futures.add(writer2.append(createProtoRows(new String[] {String.valueOf(i)}), i * 4 + 2)); + futures.add(writer2.append(createProtoRows(new String[] {String.valueOf(i)}), i * 4 + 3)); + } + + for (int i = 0; i < appendCountPerStream * 4; i++) { + AppendRowsRequest appendRowsRequest = testBigQueryWrite.getAppendRequests().get(i); + assertEquals(i, appendRowsRequest.getOffset().getValue()); + if (i % 4 == 0) { + assertEquals(appendRowsRequest.getProtoRows().getWriterSchema(), schema1); + assertEquals(appendRowsRequest.getWriteStream(), TEST_STREAM_1); + } else if (i % 4 == 1) { + assertEquals( + appendRowsRequest.getProtoRows().getWriterSchema(), ProtoSchema.getDefaultInstance()); + // Before entering multiplexing (i == 1) case, the write stream won't be populated. + assertEquals(appendRowsRequest.getWriteStream(), TEST_STREAM_1); + } else if (i % 4 == 2) { + assertEquals(appendRowsRequest.getProtoRows().getWriterSchema(), schema2); + assertEquals(appendRowsRequest.getWriteStream(), TEST_STREAM_2); + } else { + assertEquals( + appendRowsRequest.getProtoRows().getWriterSchema(), ProtoSchema.getDefaultInstance()); + assertEquals(appendRowsRequest.getWriteStream(), TEST_STREAM_2); + } + assertEquals( + appendRowsRequest.getDefaultMissingValueInterpretation(), + MissingValueInterpretation.MISSING_VALUE_INTERPRETATION_UNSPECIFIED); + } + + writer1.close(); + writer2.close(); + } + + @Test + void testMultiplexingWithDifferentStreamAndArrowSchema() throws Exception { + // Use the shared connection mode. + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); + Field col1 = new Field("col1", FieldType.nullable(new ArrowType.Utf8()), null); + Schema arrowSchema1 = new Schema(Arrays.asList(col1)); + final ByteArrayOutputStream out1 = new ByteArrayOutputStream(); + MessageSerializer.serialize(new WriteChannel(Channels.newChannel(out1)), arrowSchema1); + ArrowSchema serializedArrowSchema1 = + ArrowSchema.newBuilder() + .setSerializedSchema(ByteString.copyFrom(out1.toByteArray())) + .build(); + + Field col2 = new Field("col2", FieldType.nullable(new ArrowType.Utf8()), null); + Schema arrowSchema2 = new Schema(Arrays.asList(col2)); + final ByteArrayOutputStream out2 = new ByteArrayOutputStream(); + MessageSerializer.serialize(new WriteChannel(Channels.newChannel(out2)), arrowSchema2); + ArrowSchema serializedArrowSchema2 = + ArrowSchema.newBuilder() + .setSerializedSchema(ByteString.copyFrom(out2.toByteArray())) + .build(); + + try (StreamWriter writer1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(serializedArrowSchema1) + .setLocation("US") + .setEnableConnectionPool(true) + .build(); + StreamWriter writer2 = + StreamWriter.newBuilder(TEST_STREAM_2, client) + .setWriterSchema(serializedArrowSchema2) + .setEnableConnectionPool(true) + .setLocation("US") + .build(); ) { + + // Verify they are sharing the same connection pool. + assertEquals( + writer1.getTestOnlyConnectionWorkerPool(), writer2.getTestOnlyConnectionWorkerPool()); + + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch batch1 = + createArrowRecordBatch(arrowSchema1, allocator, new String[] {"val1"}); + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch batch2 = + createArrowRecordBatch(arrowSchema2, allocator, new String[] {"val2"}); + + ApiFuture future1 = writer1.append(batch1, 0L); + ApiFuture future2 = writer2.append(batch2, 1L); + + assertEquals(0, future1.get().getAppendResult().getOffset().getValue()); + assertEquals(1, future2.get().getAppendResult().getOffset().getValue()); + + assertEquals(2, testBigQueryWrite.getAppendRequests().size()); + + // Verify both requests sent to backend contain stream and schema. + AppendRowsRequest request1 = testBigQueryWrite.getAppendRequests().get(0); + assertEquals(TEST_STREAM_1, request1.getWriteStream()); + assertEquals(serializedArrowSchema1, request1.getArrowRows().getWriterSchema()); + assertEquals( + batch1.getSerializedRecordBatch(), + request1.getArrowRows().getRows().getSerializedRecordBatch()); + + AppendRowsRequest request2 = testBigQueryWrite.getAppendRequests().get(1); + assertEquals(TEST_STREAM_2, request2.getWriteStream()); + assertEquals(serializedArrowSchema2, request2.getArrowRows().getWriterSchema()); + assertEquals( + batch2.getSerializedRecordBatch(), + request2.getArrowRows().getRows().getSerializedRecordBatch()); + } + } + + @Test + void testFixedCredentialProvider_nullProvider() throws Exception { + // Use the shared connection mode. + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); + ProtoSchema schema1 = createProtoSchema("Schema1"); + ProtoSchema schema2 = createProtoSchema("Schema2"); + CredentialsProvider credentialsProvider1 = FixedCredentialsProvider.create(null); + CredentialsProvider credentialsProvider2 = FixedCredentialsProvider.create(null); + StreamWriter writer1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(schema1) + .setLocation("US") + .setEnableConnectionPool(true) + .setMaxInflightRequests(1) + .setCredentialsProvider(credentialsProvider1) + .build(); + StreamWriter writer2 = + StreamWriter.newBuilder(TEST_STREAM_2, client) + .setWriterSchema(schema2) + .setMaxInflightRequests(1) + .setEnableConnectionPool(true) + .setCredentialsProvider(credentialsProvider2) + .setLocation("US") + .build(); + // Null credential provided belong to the same connection pool. + assertEquals(writer1.getTestOnlyConnectionPoolMap().size(), 1); + } + + @Test + void testFixedCredentialProvider_twoCredentialsSplitPool() throws Exception { + // Use the shared connection mode. + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); + ProtoSchema schema1 = createProtoSchema("Schema1"); + ProtoSchema schema2 = createProtoSchema("Schema2"); + UserCredentials userCredentials1 = + UserCredentials.newBuilder() + .setClientId("CLIENT_ID_1") + .setClientSecret("CLIENT_SECRET_1") + .setRefreshToken("REFRESH_TOKEN_1") + .build(); + CredentialsProvider credentialsProvider1 = FixedCredentialsProvider.create(userCredentials1); + UserCredentials userCredentials2 = + UserCredentials.newBuilder() + .setClientId("CLIENT_ID_2") + .setClientSecret("CLIENT_SECRET_2") + .setRefreshToken("REFRESH_TOKEN_2") + .build(); + CredentialsProvider credentialsProvider2 = FixedCredentialsProvider.create(userCredentials2); + StreamWriter writer1 = + StreamWriter.newBuilder(TEST_STREAM_1) + .setWriterSchema(schema1) + .setLocation("US") + .setEnableConnectionPool(true) + .setMaxInflightRequests(1) + .setCredentialsProvider(credentialsProvider1) + .build(); + StreamWriter writer2 = + StreamWriter.newBuilder(TEST_STREAM_2) + .setWriterSchema(schema2) + .setMaxInflightRequests(1) + .setEnableConnectionPool(true) + .setLocation("US") + .setCredentialsProvider(credentialsProvider2) + .build(); + assertEquals(writer1.getTestOnlyConnectionPoolMap().size(), 2); + } + + @Test + void testFixedCredentialProvider_twoProviderSameCredentialSharePool() throws Exception { + // Use the shared connection mode. + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); + ProtoSchema schema1 = createProtoSchema("Schema1"); + ProtoSchema schema2 = createProtoSchema("Schema2"); + UserCredentials userCredentials = + UserCredentials.newBuilder() + .setClientId("CLIENT_ID_1") + .setClientSecret("CLIENT_SECRET_1") + .setRefreshToken("REFRESH_TOKEN_1") + .build(); + CredentialsProvider credentialsProvider1 = FixedCredentialsProvider.create(userCredentials); + CredentialsProvider credentialsProvider2 = FixedCredentialsProvider.create(userCredentials); + StreamWriter writer1 = + StreamWriter.newBuilder(TEST_STREAM_1) + .setWriterSchema(schema1) + .setLocation("US") + .setEnableConnectionPool(true) + .setMaxInflightRequests(1) + .setCredentialsProvider(credentialsProvider1) + .build(); + StreamWriter writer2 = + StreamWriter.newBuilder(TEST_STREAM_2) + .setWriterSchema(schema2) + .setMaxInflightRequests(1) + .setEnableConnectionPool(true) + .setLocation("US") + .setCredentialsProvider(credentialsProvider2) + .build(); + assertEquals(writer1.getTestOnlyConnectionPoolMap().size(), 1); + } + + @Test + void testDefaultValueInterpretation_multiplexingCase() throws Exception { + // Use the shared connection mode. + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); + ProtoSchema schema1 = createProtoSchema("Schema1"); + ProtoSchema schema2 = createProtoSchema("Schema2"); + StreamWriter writer1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(schema1) + .setLocation("US") + .setEnableConnectionPool(true) + .setMaxInflightRequests(1) + .setDefaultMissingValueInterpretation(MissingValueInterpretation.DEFAULT_VALUE) + .build(); + StreamWriter writer2 = + StreamWriter.newBuilder(TEST_STREAM_2, client) + .setWriterSchema(schema2) + .setMaxInflightRequests(1) + .setEnableConnectionPool(true) + .setLocation("US") + .setDefaultMissingValueInterpretation(MissingValueInterpretation.NULL_VALUE) + .build(); + + long appendCountPerStream = 5; + for (int i = 0; i < appendCountPerStream * 4; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + // In total insert append `appendCountPerStream` * 4 requests. + // We insert using the pattern of streamWriter1, streamWriter1, streamWriter2, streamWriter2 + for (int i = 0; i < appendCountPerStream; i++) { + ApiFuture appendFuture1 = + writer1.append(createProtoRows(new String[] {String.valueOf(i)}), i * 4); + ApiFuture appendFuture2 = + writer1.append(createProtoRows(new String[] {String.valueOf(i)}), i * 4 + 1); + ApiFuture appendFuture3 = + writer2.append(createProtoRows(new String[] {String.valueOf(i)}), i * 4 + 2); + ApiFuture appendFuture4 = + writer2.append(createProtoRows(new String[] {String.valueOf(i)}), i * 4 + 3); + appendFuture1.get(); + appendFuture2.get(); + appendFuture3.get(); + appendFuture4.get(); + } + + for (int i = 0; i < appendCountPerStream * 4; i++) { + AppendRowsRequest appendRowsRequest = testBigQueryWrite.getAppendRequests().get(i); + assertEquals(i, appendRowsRequest.getOffset().getValue()); + if (i % 4 <= 1) { + assertEquals( + appendRowsRequest.getDefaultMissingValueInterpretation(), + MissingValueInterpretation.DEFAULT_VALUE); + } else { + assertEquals( + appendRowsRequest.getDefaultMissingValueInterpretation(), + MissingValueInterpretation.NULL_VALUE); + } + } + + writer1.close(); + writer2.close(); + } + + @Test + void testAppendsWithTinyMaxInflightBytes() throws Exception { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setMaxInflightBytes(1) + .build(); + // Server will sleep 100ms before every response. + testBigQueryWrite.setResponseSleep(java.time.Duration.ofMillis(100)); + long appendCount = 10; + for (int i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + List> futures = new ArrayList<>(); + long appendStartTimeMs = System.currentTimeMillis(); + for (int i = 0; i < appendCount; i++) { + futures.add(writer.append(createProtoRows(new String[] {String.valueOf(i)}), i)); + } + long appendElapsedMs = System.currentTimeMillis() - appendStartTimeMs; + assertTrue(appendElapsedMs >= 1000); + + for (int i = 0; i < appendCount; i++) { + assertEquals(i, futures.get(i).get().getAppendResult().getOffset().getValue()); + } + assertEquals(appendCount, testBigQueryWrite.getAppendRequests().size()); + for (int i = 0; i < appendCount; i++) { + assertEquals(i, testBigQueryWrite.getAppendRequests().get(i).getOffset().getValue()); + } + writer.close(); + } + + @Test + void testAppendsWithTinyMaxInflightBytesThrow() throws Exception { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setMaxInflightBytes(1) + .setLimitExceededBehavior(FlowController.LimitExceededBehavior.ThrowException) + .build(); + Exceptions.InflightBytesLimitExceededException ex = + assertThrows( + Exceptions.InflightBytesLimitExceededException.class, + () -> { + writer.append(createProtoRows(new String[] {String.valueOf(10)}), -1); + }); + assertEquals(ex.getStatus().getCode(), Status.RESOURCE_EXHAUSTED.getCode()); + assertTrue( + ex.getStatus() + .getDescription() + .contains( + "Exceeds client side inflight buffer, consider add more buffer or open more" + + " connections")); + + assertEquals(ex.getWriterId(), writer.getWriterId()); + assertEquals(1, ex.getCurrentLimit()); + writer.close(); + } + + @Test + void testLimitBehaviorIgnoreNotAccepted() throws Exception { + StatusRuntimeException ex = + assertThrows( + StatusRuntimeException.class, + () -> { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setMaxInflightBytes(1) + .setLimitExceededBehavior(FlowController.LimitExceededBehavior.Ignore) + .build(); + }); + assertEquals(ex.getStatus().getCode(), Status.INVALID_ARGUMENT.getCode()); + assertTrue( + ex.getStatus() + .getDescription() + .contains("LimitExceededBehavior.Ignore is not supported on StreamWriter.")); + } + + @Test + void testMessageTooLarge() throws Exception { + StreamWriter writer = getTestStreamWriter(); + + // There is an oppotunity to allow 20MB requests. + String oversized = + Strings.repeat("a", (int) (ConnectionWorker.getApiMaxRequestBytes() * 2 + 1)); + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {oversized}); + assertTrue(appendFuture1.isDone()); + StatusRuntimeException actualError = + assertFutureException(StatusRuntimeException.class, appendFuture1); + assertEquals(Status.Code.INVALID_ARGUMENT, actualError.getStatus().getCode()); + assertTrue(actualError.getStatus().getDescription().contains("MessageSize is too large")); + + writer.close(); + } + + @Test + void testWrongCompressionType() throws Exception { + IllegalArgumentException ex = + assertThrows( + IllegalArgumentException.class, + () -> { + StreamWriter.newBuilder(TEST_STREAM_1, client).setCompressorName("not-gzip").build(); + }); + assertTrue( + ex.getMessage() + .contains( + "Compression of type \"not-gzip\" isn't supported, only \"gzip\" compression is" + + " supported.")); + } + + @Test + void testThrowExceptionWhileWithinAppendLoop_MaxWaitTimeExceed() throws Exception { + ProtoSchema schema1 = createProtoSchema("foo"); + StreamWriter.setMaxRequestCallbackWaitTime(java.time.Duration.ofSeconds(1)); + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client).setWriterSchema(schema1).build(); + testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(3)); + + long appendCount = 10; + for (int i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + // In total insert 5 requests, + List> futures = new ArrayList<>(); + for (int i = 0; i < appendCount; i++) { + futures.add(writer.append(createProtoRows(new String[] {String.valueOf(i)}), i)); + } + + for (int i = 0; i < appendCount; i++) { + int finalI = i; + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> futures.get(finalI).get().getAppendResult().getOffset().getValue()); + if (i == 0) { + assertThat(ex.getCause()).hasMessageThat().contains("Request has waited in inflight queue"); + assertThat(ex.getCause()) + .isInstanceOf(Exceptions.MaximumRequestCallbackWaitTimeExceededException.class); + } else { + assertThat(ex.getCause()) + .hasMessageThat() + .contains("Connection is aborted due to an unrecoverable"); + } + } + } + + @Test + void testAppendWithResetSuccess() throws Exception { + try (StreamWriter writer = getTestStreamWriter()) { + testBigQueryWrite.setCloseEveryNAppends(113); + long appendCount = 10000; + for (long i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + List> futures = new ArrayList<>(); + for (long i = 0; i < appendCount; i++) { + futures.add(sendTestMessage(writer, new String[] {String.valueOf(i)}, i)); + } + for (int i = 0; i < appendCount; i++) { + assertEquals(futures.get(i).get().getAppendResult().getOffset().getValue(), (long) i); + } + assertTrue(testBigQueryWrite.getConnectionCount() >= (int) (appendCount / 113.0)); + } + } + + @Test + void testAppendWithResetNeverSuccess() throws Exception { + try (StreamWriter writer = getTestStreamWriter()) { + testBigQueryWrite.setCloseForeverAfter(1); + long appendCount = 100; + for (long i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + List> futures = new ArrayList<>(); + for (long i = 0; i < appendCount; i++) { + futures.add(sendTestMessage(writer, new String[] {String.valueOf(i)}, i)); + } + // first request succeeded. + assertEquals(futures.get(0).get().getAppendResult().getOffset().getValue(), 0); + // after 5 seconds, the requests will bail out. + for (int i = 1; i < appendCount; i++) { + if (i == 1) { + assertFutureException(AbortedException.class, futures.get(i)); + } else { + assertFutureException(StreamWriterClosedException.class, futures.get(i)); + } + } + } + } + + @Test + void testAppendWithResetNeverSuccessWithMultiplexing() throws Exception { + try (StreamWriter writer = getMultiplexingTestStreamWriter()) { + testBigQueryWrite.setCloseForeverAfter(1); + long appendCount = 100; + for (long i = 0; i < appendCount; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + List> futures = new ArrayList<>(); + for (long i = 0; i < appendCount; i++) { + futures.add(sendTestMessage(writer, new String[] {String.valueOf(i)}, i)); + } + // first request succeeded. + assertEquals(futures.get(0).get().getAppendResult().getOffset().getValue(), 0); + // after 5 seconds, the requests will bail out. + for (int i = 1; i < appendCount; i++) { + if (i == 1) { + assertFutureException(AbortedException.class, futures.get(i)); + } else { + assertFutureException(StreamWriterClosedException.class, futures.get(i)); + } + } + } + } + + // This test is setup for the server to force a retry after all records are sent. Ensure the + // records are resent, even if no new records are appeneded. + @Test + void testRetryAfterAllRecordsInflight() throws Exception { + try (StreamWriter writer = getTestStreamWriter()) { + testBigQueryWrite.setCloseEveryNAppends(2); + testBigQueryWrite.setTimesToClose(1); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}, 0); + ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}, 1); + TimeUnit.SECONDS.sleep(1); + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); + } + } + + @Test + void testWriterClosedStream() throws Exception { + try (StreamWriter writer = getTestStreamWriter()) { + // Writer is closed without any traffic. + TimeUnit.SECONDS.sleep(1); + } + } + + @Test + void testWriterAlreadyClosedException() throws Exception { + StreamWriter writer = getTestStreamWriter(); + writer.close(); + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}, 0); + Exceptions.StreamWriterClosedException actualError = + assertFutureException(Exceptions.StreamWriterClosedException.class, appendFuture1); + // The basic StatusRuntimeException API is not changed. + assertTrue(actualError instanceof StatusRuntimeException); + assertEquals(Status.Code.FAILED_PRECONDITION, actualError.getStatus().getCode()); + assertTrue(actualError.getStatus().getDescription().contains("User closed StreamWriter")); + assertEquals(actualError.getWriterId(), writer.getWriterId()); + assertEquals(actualError.getStreamName(), writer.getStreamName()); + } + + @Test + void testWriterClosedException() throws Exception { + StreamWriter writer = getTestStreamWriter(); + testBigQueryWrite.addException(Status.INTERNAL.asException()); + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}, 0); + try { + appendFuture1.get(); + } catch (Exception e) { + } + ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"A"}, 0); + Exceptions.StreamWriterClosedException actualError = + assertFutureException(Exceptions.StreamWriterClosedException.class, appendFuture2); + // The basic StatusRuntimeException API is not changed. + assertTrue(actualError instanceof StatusRuntimeException); + assertEquals(Status.Code.FAILED_PRECONDITION, actualError.getStatus().getCode()); + assertTrue(actualError.getStatus().getDescription().contains("Connection is closed")); + assertEquals(actualError.getWriterId(), writer.getWriterId()); + assertEquals(actualError.getStreamName(), writer.getStreamName()); + } + + @Test + void testWriterId() + throws Descriptors.DescriptorValidationException, IOException, InterruptedException { + StreamWriter writer1 = getTestStreamWriter(); + assertFalse(writer1.getWriterId().isEmpty()); + StreamWriter writer2 = getTestStreamWriter(); + assertFalse(writer2.getWriterId().isEmpty()); + assertNotEquals(writer1.getWriterId(), writer2.getWriterId()); + } + + @Test + void testInitialization_operationKind() throws Exception { + try (StreamWriter streamWriter = getMultiplexingTestStreamWriter()) { + assertEquals(streamWriter.getConnectionOperationType(), Kind.CONNECTION_WORKER_POOL); + } + try (StreamWriter streamWriter = getTestStreamWriter()) { + assertEquals(streamWriter.getConnectionOperationType(), Kind.CONNECTION_WORKER); + } + } + + @Test + void testExtractDatasetName() throws Exception { + assertEquals( + StreamWriter.extractDatasetAndProjectName( + "projects/project1/datasets/dataset2/tables/something"), + "projects/project1/datasets/dataset2/"); + + IllegalStateException ex = + assertThrows( + IllegalStateException.class, + () -> { + StreamWriter.extractDatasetAndProjectName( + "wrong/projects/project1/wrong/datasets/dataset2/tables/something"); + }); + assertTrue(ex.getMessage().contains("The passed in stream name does not match")); + } + + @Test + void testRetryInUnrecoverableStatus_MultiplexingCase() throws Exception { + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(4).build()); + ConnectionWorkerPool.enableTestingLogic(); + + // Setup: create three stream writers, two of them are writing to the same stream. + // Those four stream writers should be assigned to the same connection. + // 1. Submit three requests at first to trigger connection retry limitation. + // 2. At this point the connection should be entering a unrecoverable state. + // 3. Further submit requests to those stream writers would trigger connection reassignment. + StreamWriter writer1 = getMultiplexingStreamWriter(TEST_STREAM_1); + StreamWriter writer2 = getMultiplexingStreamWriter(TEST_STREAM_2); + StreamWriter writer3 = getMultiplexingStreamWriter(TEST_STREAM_3); + StreamWriter writer4 = getMultiplexingStreamWriter(TEST_STREAM_3); + + testBigQueryWrite.setCloseForeverAfter(2); + testBigQueryWrite.setTimesToClose(1); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + // Connection will be failed after triggering the third append. + ApiFuture appendFuture1 = sendTestMessage(writer1, new String[] {"A"}, 0); + ApiFuture appendFuture2 = sendTestMessage(writer2, new String[] {"B"}, 1); + ApiFuture appendFuture3 = sendTestMessage(writer3, new String[] {"C"}, 2); + TimeUnit.SECONDS.sleep(1); + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); + assertThrows( + ExecutionException.class, + () -> { + assertEquals(2, appendFuture3.get().getAppendResult().getOffset().getValue()); + }); + assertEquals(writer1.getTestOnlyConnectionWorkerPool().getTotalConnectionCount(), 1); + assertEquals(writer1.getTestOnlyConnectionWorkerPool().getCreateConnectionCount(), 1); + + // Insert another request to the writer attached to closed connection would create another + // connection. + + testBigQueryWrite.setCloseForeverAfter(0); + testBigQueryWrite.addResponse(createAppendResponse(4)); + testBigQueryWrite.addResponse(createAppendResponse(5)); + testBigQueryWrite.addResponse(createAppendResponse(6)); + ApiFuture appendFuture4 = sendTestMessage(writer4, new String[] {"A"}, 2); + ApiFuture appendFuture5 = sendTestMessage(writer1, new String[] {"A"}, 3); + ApiFuture appendFuture6 = sendTestMessage(writer2, new String[] {"B"}, 4); + assertEquals(4, appendFuture4.get().getAppendResult().getOffset().getValue()); + assertEquals(5, appendFuture5.get().getAppendResult().getOffset().getValue()); + assertEquals(6, appendFuture6.get().getAppendResult().getOffset().getValue()); + assertEquals(writer1.getTestOnlyConnectionWorkerPool().getTotalConnectionCount(), 1); + assertEquals(writer1.getTestOnlyConnectionWorkerPool().getCreateConnectionCount(), 2); + + writer1.close(); + writer2.close(); + writer3.close(); + writer4.close(); + assertEquals(writer1.getTestOnlyConnectionWorkerPool().getTotalConnectionCount(), 0); + } + + @Test + void testCloseWhileInUnrecoverableState() throws Exception { + ConnectionWorkerPool.setOptions( + Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(4).build()); + ConnectionWorkerPool.enableTestingLogic(); + + // Setup: create three stream writers + // 1. Submit three requests at first to trigger connection retry limitation. + // 2. Submit request to writer3 to trigger reassignment + // 3. Close the previous two writers would be succesful + StreamWriter writer1 = getMultiplexingStreamWriter(TEST_STREAM_1); + StreamWriter writer2 = getMultiplexingStreamWriter(TEST_STREAM_2); + StreamWriter writer3 = getMultiplexingStreamWriter(TEST_STREAM_3); + + testBigQueryWrite.setCloseForeverAfter(2); + testBigQueryWrite.setTimesToClose(1); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + // Connection will be failed after triggering the third append. + ApiFuture appendFuture1 = sendTestMessage(writer1, new String[] {"A"}, 0); + ApiFuture appendFuture2 = sendTestMessage(writer2, new String[] {"B"}, 1); + ApiFuture appendFuture3 = sendTestMessage(writer3, new String[] {"C"}, 2); + TimeUnit.SECONDS.sleep(1); + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); + assertThrows( + ExecutionException.class, + () -> { + assertEquals(2, appendFuture3.get().getAppendResult().getOffset().getValue()); + }); + assertEquals(writer1.getTestOnlyConnectionWorkerPool().getTotalConnectionCount(), 1); + assertEquals(writer1.getTestOnlyConnectionWorkerPool().getCreateConnectionCount(), 1); + + writer1.close(); + writer2.close(); + // We will still be left with one request + assertEquals(writer1.getTestOnlyConnectionWorkerPool().getCreateConnectionCount(), 1); + } + + public StreamWriter getMultiplexingStreamWriter(String streamName) throws IOException { + return StreamWriter.newBuilder(streamName, client) + .setWriterSchema(createProtoSchema()) + .setEnableConnectionPool(true) + .setMaxInflightRequests(10) + .setLocation("US") + .setMaxRetryDuration(java.time.Duration.ofMillis(100)) + .setRetrySettings(retrySettings) + .build(); + } + + // Timeout to ensure close() doesn't wait for done callback timeout. + @org.junit.jupiter.api.Timeout(10000) + void testCloseDisconnectedStream() throws Exception { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setChannelProvider(serviceHelper.createChannelProvider()) + .setWriterSchema(createProtoSchema()) + .build(); + + testBigQueryWrite.addResponse(createAppendResponse(0)); + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + serviceHelper.stop(); + // Ensure closing the writer after disconnect succeeds. + writer.close(); + } + + @Test + void testSetAndGetMissingValueInterpretationMap() throws Exception { + StreamWriter.Builder writerBuilder = getTestStreamWriterBuilder(); + Map missingValueMap = new HashMap(); + missingValueMap.put("col1", AppendRowsRequest.MissingValueInterpretation.NULL_VALUE); + missingValueMap.put("col3", AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE); + writerBuilder.setMissingValueInterpretationMap(missingValueMap); + StreamWriter writer = writerBuilder.build(); + assertEquals(missingValueMap, writer.getMissingValueInterpretationMap()); + } + + @Test + void testAppendWithoutMissingValueMap() throws Exception { + try (StreamWriter writer = getTestStreamWriter()) { + + testBigQueryWrite.addResponse(createAppendResponse(0)); + + ApiFuture responseFuture = + writer.append(createProtoRows(new String[] {String.valueOf(0)}), 0); + + assertEquals(0, responseFuture.get().getAppendResult().getOffset().getValue()); + + verifyAppendRequests(1); + assertTrue( + testBigQueryWrite.getAppendRequests().get(0).getMissingValueInterpretations().isEmpty()); + } + } + + @Test + void testAppendWithMissingValueMap() throws Exception { + Map missingValueMap = new HashMap(); + missingValueMap.put("col1", AppendRowsRequest.MissingValueInterpretation.NULL_VALUE); + missingValueMap.put("col3", AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE); + + try (StreamWriter writer = + getTestStreamWriterBuilder().setMissingValueInterpretationMap(missingValueMap).build()) { + + testBigQueryWrite.addResponse(createAppendResponse(0)); + + ApiFuture responseFuture = + writer.append(createProtoRows(new String[] {String.valueOf(0)}), 0); + + assertEquals(0, responseFuture.get().getAppendResult().getOffset().getValue()); + + verifyAppendRequests(1); + + assertEquals( + testBigQueryWrite.getAppendRequests().get(0).getMissingValueInterpretations(), + missingValueMap); + } + } + + @org.junit.jupiter.api.Timeout(10000) + void testStreamWriterUserCloseMultiplexing() throws Exception { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setEnableConnectionPool(true) + .setLocation("us") + .build(); + + writer.close(); + assertTrue(writer.isClosed()); + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> { + appendFuture1.get(); + }); + assertEquals( + Status.Code.FAILED_PRECONDITION, + ((StatusRuntimeException) ex.getCause()).getStatus().getCode()); + assertTrue(writer.isUserClosed()); + } + + @org.junit.jupiter.api.Timeout(10000) + void testStreamWriterUserCloseNoMultiplexing() throws Exception { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client).setWriterSchema(createProtoSchema()).build(); + + writer.close(); + assertTrue(writer.isClosed()); + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> { + appendFuture1.get(); + }); + assertEquals( + Status.Code.FAILED_PRECONDITION, + ((StatusRuntimeException) ex.getCause()).getStatus().getCode()); + assertTrue(writer.isUserClosed()); + } + + @org.junit.jupiter.api.Timeout(10000) + void testStreamWriterPermanentErrorMultiplexing() throws Exception { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setEnableConnectionPool(true) + .setLocation("us") + .build(); + testBigQueryWrite.setCloseForeverAfter(1); + // Permenant errror. + testBigQueryWrite.setFailedStatus(Status.INVALID_ARGUMENT); + testBigQueryWrite.addResponse(createAppendResponse(0)); + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + appendFuture1.get(); + ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"A"}); + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> { + appendFuture2.get(); + }); + assertTrue(ex.getCause() instanceof InvalidArgumentException); + assertFalse(writer.isClosed()); + assertFalse(writer.isUserClosed()); + } + + @org.junit.jupiter.api.Timeout(10000) + void testStreamWriterPermanentErrorNoMultiplexing() throws Exception { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client).setWriterSchema(createProtoSchema()).build(); + testBigQueryWrite.setCloseForeverAfter(1); + // Permenant errror. + testBigQueryWrite.setFailedStatus(Status.INVALID_ARGUMENT); + testBigQueryWrite.addResponse(createAppendResponse(0)); + ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); + appendFuture1.get(); + ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"A"}); + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> { + appendFuture2.get(); + }); + assertTrue(writer.isClosed()); + assertTrue(ex.getCause() instanceof InvalidArgumentException); + assertFalse(writer.isUserClosed()); + } + + @org.junit.jupiter.api.Timeout(10000) + void testBuilderDefaultSetting() throws Exception { + StreamWriter.Builder writerBuilder = StreamWriter.newBuilder(TEST_STREAM_1); + BigQueryWriteSettings writeSettings = StreamWriter.getBigQueryWriteSettings(writerBuilder); + assertEquals( + BigQueryWriteSettings.defaultExecutorProviderBuilder().build().toString(), + writeSettings.getBackgroundExecutorProvider().toString()); + assertEquals( + BigQueryWriteSettings.defaultCredentialsProviderBuilder().build().toString(), + writeSettings.getCredentialsProvider().toString()); + assertTrue( + writeSettings.getTransportChannelProvider() instanceof InstantiatingGrpcChannelProvider); + assertTrue( + ((InstantiatingGrpcChannelProvider) writeSettings.getTransportChannelProvider()) + .getKeepAliveWithoutCalls()); + assertEquals( + ((InstantiatingGrpcChannelProvider) writeSettings.getTransportChannelProvider()) + .getKeepAliveTimeoutDuration(), + java.time.Duration.ofMinutes(1)); + assertEquals( + ((InstantiatingGrpcChannelProvider) writeSettings.getTransportChannelProvider()) + .getKeepAliveTimeDuration(), + java.time.Duration.ofMinutes(1)); + assertEquals( + BigQueryWriteSettings.getDefaultEndpoint(), writeSettings.getEndpoint().toString()); + } + + @org.junit.jupiter.api.Timeout(10000) + void testBuilderExplicitSetting() throws Exception { + // Client has special seetings. + BigQueryWriteSettings clientSettings = + BigQueryWriteSettings.newBuilder() + .setEndpoint("xxx:345") + .setBackgroundExecutorProvider( + InstantiatingExecutorProvider.newBuilder().setExecutorThreadCount(4).build()) + .setTransportChannelProvider(serviceHelper.createChannelProvider()) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + BigQueryWriteClient client = BigQueryWriteClient.create(clientSettings); + StreamWriter.Builder writerWithClient = StreamWriter.newBuilder(TEST_STREAM_1, client); + BigQueryWriteSettings writerSettings = StreamWriter.getBigQueryWriteSettings(writerWithClient); + assertEquals("xxx:345", writerSettings.getEndpoint()); + assertTrue( + writerSettings.getBackgroundExecutorProvider() instanceof InstantiatingExecutorProvider); + assertEquals( + 4, + ((InstantiatingExecutorProvider) writerSettings.getBackgroundExecutorProvider()) + .getExecutorThreadCount()); + + // Explicit setting on StreamWriter is respected. + StreamWriter.Builder writerWithClientWithOverrides = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setEndpoint("yyy:345") + .setExecutorProvider( + InstantiatingExecutorProvider.newBuilder().setExecutorThreadCount(14).build()) + .setChannelProvider( + BigQueryWriteSettings.defaultGrpcTransportProviderBuilder() + .setKeepAliveTimeoutDuration(java.time.Duration.ofSeconds(500)) + .build()) + .setCredentialsProvider( + BigQueryWriteSettings.defaultCredentialsProviderBuilder() + .setScopesToApply(Arrays.asList("A", "B")) + .build()); + BigQueryWriteSettings writerSettings2 = + StreamWriter.getBigQueryWriteSettings(writerWithClientWithOverrides); + assertEquals("yyy:345", writerSettings2.getEndpoint()); + assertTrue( + writerSettings2.getBackgroundExecutorProvider() instanceof InstantiatingExecutorProvider); + assertEquals( + 14, + ((InstantiatingExecutorProvider) writerSettings2.getBackgroundExecutorProvider()) + .getExecutorThreadCount()); + assertTrue( + writerSettings2.getTransportChannelProvider() instanceof InstantiatingGrpcChannelProvider); + assertEquals( + java.time.Duration.ofSeconds(500), + ((InstantiatingGrpcChannelProvider) writerSettings2.getTransportChannelProvider()) + .getKeepAliveTimeoutDuration()); + assertTrue(writerSettings2.getCredentialsProvider() instanceof GoogleCredentialsProvider); + assertEquals( + 2, + ((GoogleCredentialsProvider) writerSettings2.getCredentialsProvider()) + .getScopesToApply() + .size()); + + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + + @Test + void testAppendSuccessAndInternalErrorRetrySuccess() throws Exception { + StreamWriter writer = getTestStreamWriterRetryEnabled(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.INTERNAL.ordinal()).build()); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse(createAppendResponse(0)); + + ApiFuture appendFuture1 = + writer.append(createProtoRows(new String[] {"A"})); + ApiFuture appendFuture2 = + writer.append(createProtoRows(new String[] {"B"})); + ApiFuture appendFuture3 = + writer.append(createProtoRows(new String[] {"C"})); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(0, appendFuture2.get().getAppendResult().getOffset().getValue()); + assertEquals(0, appendFuture3.get().getAppendResult().getOffset().getValue()); + + writer.close(); + } + + @Test + void testAppendSuccessAndInternalQuotaErrorRetrySuccess() throws Exception { + StreamWriter writer = getTestStreamWriterRetryEnabled(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.RESOURCE_EXHAUSTED.ordinal()).build()); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse(createAppendResponse(0)); + + ApiFuture appendFuture1 = + writer.append(createProtoRows(new String[] {"A"})); + ApiFuture appendFuture2 = + writer.append(createProtoRows(new String[] {"B"})); + ApiFuture appendFuture3 = + writer.append(createProtoRows(new String[] {"C"})); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(0, appendFuture2.get().getAppendResult().getOffset().getValue()); + assertEquals(0, appendFuture3.get().getAppendResult().getOffset().getValue()); + + writer.close(); + } + + /* temporarily disable test as static variable is interfering with other tests + @Test + void testInternalQuotaError_MaxWaitTimeExceed_RetrySuccess() throws Exception { + // In order for the test to succeed, the given request must complete successfully even after all + // the retries. The fake server is configured to fail 3 times with a quota error. This means the + // client will perform retry with exponential backoff. The fake server injects 1 second of delay + // for each response. In addition, the exponential backoff injects a couple of seconds of delay. + // This yields an overall delay of about 5 seconds before the request succeeds. If the request + // send timestamp was being set only once, this would eventually exceed the 4 second timeout + // limit, and throw an exception. With the current behavior, the request send timestamp is reset + // each time a retry is performed, so we never exceed the 4 second timeout limit. + StreamWriter.setMaxRequestCallbackWaitTime(java.time.Duration.ofSeconds(4)); + testBigQueryWrite.setResponseSleep(Duration.ofSeconds(1)); + StreamWriter writer = getTestStreamWriterRetryEnabled(); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.RESOURCE_EXHAUSTED.ordinal()).build()); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.RESOURCE_EXHAUSTED.ordinal()).build()); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.RESOURCE_EXHAUSTED.ordinal()).build()); + testBigQueryWrite.addResponse(createAppendResponse(0)); + + ApiFuture appendFuture1 = + writer.append(createProtoRows(new String[] {"A"})); + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + writer.close(); + } + */ + + @Test + void testAppendSuccessAndInternalErrorRetrySuccessExclusive() throws Exception { + // Ensure we return an error from the fake server when a retry is in progress + testBigQueryWrite.setReturnErrorDuringExclusiveStreamRetry(true); + // Ensure messages will be in the inflight queue + testBigQueryWrite.setVerifyOffset(true); + StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse( + new DummyResponseSupplierWillFailThenSucceed( + new FakeBigQueryWriteImpl.Response(createAppendResponse(1)), + /* totalFailCount= */ MAX_RETRY_NUM_ATTEMPTS, + com.google.rpc.Status.newBuilder().setCode(Code.INTERNAL.ordinal()).build())); + testBigQueryWrite.addResponse(createAppendResponse(2)); + + ApiFuture appendFuture1 = + writer.append(createProtoRows(new String[] {"A"}), 0); + ApiFuture appendFuture2 = + writer.append(createProtoRows(new String[] {"B"}), 1); + ApiFuture appendFuture3 = + writer.append(createProtoRows(new String[] {"C"}), 2); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); + assertEquals(2, appendFuture3.get().getAppendResult().getOffset().getValue()); + + writer.close(); + } + + @Test + void testAppendSuccessAndInternalErrorRetryNoOffsetSuccessExclusive() throws Exception { + StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.INTERNAL.ordinal()).build()); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = + writer.append(createProtoRows(new String[] {"A"})); + ApiFuture appendFuture2 = + writer.append(createProtoRows(new String[] {"B"})); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); + + writer.close(); + } + + @Test + void testAppendSuccessAndQuotaErrorRetryNoOffsetSuccessExclusive() throws Exception { + StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.RESOURCE_EXHAUSTED.ordinal()).build()); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = + writer.append(createProtoRows(new String[] {"A"})); + ApiFuture appendFuture2 = + writer.append(createProtoRows(new String[] {"B"})); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); + + writer.close(); + } + + @Test + void testExclusiveAppendSuccessAndInternalErrorRetrySuccess() throws Exception { + // Ensure we return an error from the fake server when a retry is in progress + testBigQueryWrite.setReturnErrorDuringExclusiveStreamRetry(true); + // Ensure messages will be in the inflight queue + testBigQueryWrite.setVerifyOffset(true); + // fakeBigQueryWrite.setResponseSleep(Duration.ofSeconds(3)); + StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); + long appendCount = 20; + for (long i = 0; i < appendCount; i++) { + // Add a retriable error every 3 messages + if (i % 3 == 0) { + testBigQueryWrite.addResponse( + new DummyResponseSupplierWillFailThenSucceed( + new FakeBigQueryWriteImpl.Response(createAppendResponse(i)), + /* totalFailCount= */ MAX_RETRY_NUM_ATTEMPTS, + com.google.rpc.Status.newBuilder().setCode(Code.INTERNAL.ordinal()).build())); + } else { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + } + + List> futures = new ArrayList<>(); + for (long i = 0; i < appendCount; i++) { + futures.add(writer.append(createProtoRows(new String[] {String.valueOf(i)}), i)); + } + + for (int i = 0; i < appendCount; i++) { + assertThat(futures.get(i).get().getAppendResult().getOffset().getValue()).isEqualTo((long) i); + } + } + + @Test + void testExclusiveAppendSuccessAndQuotaErrorRetrySuccess() throws Exception { + // Ensure we return an error from the fake server when a retry is in progress + testBigQueryWrite.setReturnErrorDuringExclusiveStreamRetry(true); + // Ensure messages will be in the inflight queue + testBigQueryWrite.setVerifyOffset(true); + // fakeBigQueryWrite.setResponseSleep(Duration.ofSeconds(3)); + StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); + long appendCount = 20; + for (long i = 0; i < appendCount; i++) { + // Add a retriable error every 3 messages + if (i % 3 == 0) { + testBigQueryWrite.addResponse( + new DummyResponseSupplierWillFailThenSucceed( + new FakeBigQueryWriteImpl.Response(createAppendResponse(i)), + /* totalFailCount= */ MAX_RETRY_NUM_ATTEMPTS, + com.google.rpc.Status.newBuilder() + .setCode(Code.RESOURCE_EXHAUSTED.ordinal()) + .build())); + } else { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + } + + List> futures = new ArrayList<>(); + for (long i = 0; i < appendCount; i++) { + futures.add(writer.append(createProtoRows(new String[] {String.valueOf(i)}), i)); + } + + for (int i = 0; i < appendCount; i++) { + assertThat(futures.get(i).get().getAppendResult().getOffset().getValue()).isEqualTo((long) i); + } + } + + @Test + void testAppendSuccessAndQuotaErrorRetrySuccessExclusive() throws Exception { + StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse( + new DummyResponseSupplierWillFailThenSucceed( + new FakeBigQueryWriteImpl.Response(createAppendResponse(1)), + /* totalFailCount= */ MAX_RETRY_NUM_ATTEMPTS, + com.google.rpc.Status.newBuilder().setCode(Code.RESOURCE_EXHAUSTED.ordinal()).build())); + + ApiFuture appendFuture1 = + writer.append(createProtoRows(new String[] {"A"}), 0); + ApiFuture appendFuture2 = + writer.append(createProtoRows(new String[] {"B"}), 1); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); + + writer.close(); + } + + @Test + void testAppendSuccessWithArrowSerializedData() throws Exception { + StreamWriter writer = getTestStreamWriterExclusiveRetryEnabledWithArrowSchema(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = + writer.append(createArrowRecordBatch(new String[] {"A"}), 0L); + ApiFuture appendFuture2 = + writer.append(createArrowRecordBatch(new String[] {"B"}), 1L); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); + + writer.close(); + } + + @Test + void testAppendSuccessWithUnserializedArrowRecordBatch() throws Exception { + StreamWriter writer = getTestStreamWriterExclusiveRetryEnabledWithUnserialiedArrowSchema(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1; + ApiFuture appendFuture2; + + try (VectorSchemaRoot vectorSchemaRoot1 = VectorSchemaRoot.create(ARROW_SCHEMA, allocator)) { + VarCharVector fooVector1 = (VarCharVector) vectorSchemaRoot1.getVector("foo"); + fooVector1.allocateNew(1); + fooVector1.set(0, "A".getBytes(UTF_8)); + vectorSchemaRoot1.setRowCount(1); + VectorUnloader vectorUnloader1 = new VectorUnloader(vectorSchemaRoot1); + try (ArrowRecordBatch recordBatch1 = vectorUnloader1.getRecordBatch()) { + appendFuture1 = writer.append(recordBatch1, 0L); + } + } + + try (VectorSchemaRoot vectorSchemaRoot2 = VectorSchemaRoot.create(ARROW_SCHEMA, allocator)) { + VarCharVector fooVector2 = (VarCharVector) vectorSchemaRoot2.getVector("foo"); + fooVector2.allocateNew(1); + fooVector2.set(0, "B".getBytes(UTF_8)); + vectorSchemaRoot2.setRowCount(1); + VectorUnloader vectorUnloader2 = new VectorUnloader(vectorSchemaRoot2); + try (ArrowRecordBatch recordBatch2 = vectorUnloader2.getRecordBatch()) { + appendFuture2 = writer.append(recordBatch2, 1L); + } + } + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); + + writer.close(); + } + + @Test + void testAppendSuccessAndInternalErrorMaxRetryNumAttempts() throws Exception { + StreamWriter writer = getTestStreamWriterRetryEnabled(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.INTERNAL.ordinal()).build()); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.INTERNAL.ordinal()).build()); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.INTERNAL.ordinal()).build()); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.INTERNAL.ordinal()).build()); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = + writer.append(createProtoRows(new String[] {"A"})); + ApiFuture appendFuture2 = + writer.append(createProtoRows(new String[] {"B"})); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> { + appendFuture2.get(); + }); + assertEquals( + Status.Code.INTERNAL, ((StatusRuntimeException) ex.getCause()).getStatus().getCode()); + } + + @Test + void testAppendSuccessAndQuotaErrorMaxRetryNumAttempts() throws Exception { + StreamWriter writer = getTestStreamWriterRetryEnabled(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.RESOURCE_EXHAUSTED.ordinal()).build()); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.RESOURCE_EXHAUSTED.ordinal()).build()); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.RESOURCE_EXHAUSTED.ordinal()).build()); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.RESOURCE_EXHAUSTED.ordinal()).build()); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = + writer.append(createProtoRows(new String[] {"A"})); + ApiFuture appendFuture2 = + writer.append(createProtoRows(new String[] {"B"})); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> { + appendFuture2.get(); + }); + assertEquals( + Status.Code.RESOURCE_EXHAUSTED, + ((StatusRuntimeException) ex.getCause()).getStatus().getCode()); + } + + @Test + void testExclusiveAppendSuccessAndInternalErrorRetryMaxRetry() throws Exception { + testBigQueryWrite.setReturnErrorDuringExclusiveStreamRetry(true); + // Ensure messages will be in the inflight queue + testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(1)); + StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); + + int appendCount = 10; + for (long i = 0; i < appendCount - 1; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + testBigQueryWrite.addResponse( + new DummyResponseSupplierWillFailThenSucceed( + new FakeBigQueryWriteImpl.Response(createAppendResponse(appendCount)), + /* totalFailCount= */ MAX_RETRY_NUM_ATTEMPTS + 1, + com.google.rpc.Status.newBuilder().setCode(Code.INTERNAL.ordinal()).build())); + + List> futures = new ArrayList<>(); + for (long i = 0; i < appendCount; i++) { + futures.add(writer.append(createProtoRows(new String[] {String.valueOf(i)}), i)); + } + + for (int i = 0; i < appendCount - 1; i++) { + assertThat(futures.get(i).get().getAppendResult().getOffset().getValue()).isEqualTo((long) i); + } + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> { + futures.get(appendCount - 1).get(); + }); + assertEquals( + Status.Code.INTERNAL, ((StatusRuntimeException) ex.getCause()).getStatus().getCode()); + } + + @Test + void testExclusiveAppendSuccessAndQuotaErrorRetryMaxRetry() throws Exception { + testBigQueryWrite.setReturnErrorDuringExclusiveStreamRetry(true); + // Ensure messages will be in the inflight queue + testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(1)); + StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); + + int appendCount = 10; + for (long i = 0; i < appendCount - 1; i++) { + testBigQueryWrite.addResponse(createAppendResponse(i)); + } + + testBigQueryWrite.addResponse( + new DummyResponseSupplierWillFailThenSucceed( + new FakeBigQueryWriteImpl.Response(createAppendResponse(appendCount)), + /* totalFailCount= */ MAX_RETRY_NUM_ATTEMPTS + 1, + com.google.rpc.Status.newBuilder().setCode(Code.RESOURCE_EXHAUSTED.ordinal()).build())); + + List> futures = new ArrayList<>(); + for (long i = 0; i < appendCount; i++) { + futures.add(writer.append(createProtoRows(new String[] {String.valueOf(i)}), i)); + } + + for (int i = 0; i < appendCount - 1; i++) { + assertThat(futures.get(i).get().getAppendResult().getOffset().getValue()).isEqualTo((long) i); + } + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> { + futures.get(appendCount - 1).get(); + }); + assertEquals( + Status.Code.RESOURCE_EXHAUSTED, + ((StatusRuntimeException) ex.getCause()).getStatus().getCode()); + } + + @Test + void testExclusiveAppendQuotaErrorRetryExponentialBackoff() throws Exception { + testBigQueryWrite.setReturnErrorDuringExclusiveStreamRetry(true); + StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); + + testBigQueryWrite.addResponse( + new DummyResponseSupplierWillFailThenSucceed( + new FakeBigQueryWriteImpl.Response(createAppendResponse(0)), + /* totalFailCount= */ MAX_RETRY_NUM_ATTEMPTS + 1, + com.google.rpc.Status.newBuilder().setCode(Code.RESOURCE_EXHAUSTED.ordinal()).build())); + + ApiFuture future = + writer.append(createProtoRows(new String[] {String.valueOf(0)}), 0); + + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> { + future.get(); + }); + assertEquals( + Status.Code.RESOURCE_EXHAUSTED, + ((StatusRuntimeException) ex.getCause()).getStatus().getCode()); + + ArrayList instants = testBigQueryWrite.getLatestRequestReceivedInstants(); + Instant previousInstant = instants.get(0); + // Include initial attempt + assertEquals(instants.size(), MAX_RETRY_NUM_ATTEMPTS + 1); + double minExpectedDelay = INITIAL_RETRY_MILLIS * 0.95; + for (int i = 1; i < instants.size(); i++) { + Instant currentInstant = instants.get(i); + double differenceInMillis = + java.time.Duration.between(previousInstant, currentInstant).toMillis(); + assertThat(differenceInMillis).isAtLeast((double) INITIAL_RETRY_MILLIS); + assertThat(differenceInMillis).isGreaterThan(minExpectedDelay); + minExpectedDelay = minExpectedDelay * RETRY_MULTIPLIER; + previousInstant = currentInstant; + } + } + + @Test + void testAppendInternalErrorRetryExponentialBackoff() throws Exception { + StreamWriter writer = getTestStreamWriterRetryEnabled(); + + testBigQueryWrite.addResponse( + new DummyResponseSupplierWillFailThenSucceed( + new FakeBigQueryWriteImpl.Response(createAppendResponse(0)), + /* totalFailCount= */ MAX_RETRY_NUM_ATTEMPTS + 1, + com.google.rpc.Status.newBuilder().setCode(Code.INTERNAL.ordinal()).build())); + + ApiFuture future = + writer.append(createProtoRows(new String[] {String.valueOf(0)}), 0); + + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> { + future.get(); + }); + assertEquals( + Status.Code.INTERNAL, ((StatusRuntimeException) ex.getCause()).getStatus().getCode()); + + ArrayList instants = testBigQueryWrite.getLatestRequestReceivedInstants(); + Instant previousInstant = instants.get(0); + // Include initial attempt + assertEquals(instants.size(), MAX_RETRY_NUM_ATTEMPTS + 1); + double minExpectedDelay = INITIAL_RETRY_MILLIS * 0.95; + for (int i = 1; i < instants.size(); i++) { + Instant currentInstant = instants.get(i); + double differenceInMillis = + java.time.Duration.between(previousInstant, currentInstant).toMillis(); + assertThat(differenceInMillis).isAtLeast((double) INITIAL_RETRY_MILLIS); + assertThat(differenceInMillis).isGreaterThan(minExpectedDelay); + minExpectedDelay = minExpectedDelay * RETRY_MULTIPLIER; + previousInstant = currentInstant; + } + } + + @Test + void testAppendSuccessAndNonRetryableError() throws Exception { + StreamWriter writer = getTestStreamWriterRetryEnabled(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.INVALID_ARGUMENT.ordinal()).build()); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = + writer.append(createProtoRows(new String[] {"A"})); + ApiFuture appendFuture2 = + writer.append(createProtoRows(new String[] {"B"})); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> { + appendFuture2.get(); + }); + assertEquals( + Status.Code.INVALID_ARGUMENT, + ((StatusRuntimeException) ex.getCause()).getStatus().getCode()); + } + + @Test + void testExclusiveAppendSuccessAndNonRetryableError() throws Exception { + StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); + testBigQueryWrite.addResponse(createAppendResponse(0)); + testBigQueryWrite.addStatusException( + com.google.rpc.Status.newBuilder().setCode(Code.INVALID_ARGUMENT.ordinal()).build()); + testBigQueryWrite.addResponse(createAppendResponse(1)); + + ApiFuture appendFuture1 = + writer.append(createProtoRows(new String[] {"A"}), 0); + ApiFuture appendFuture2 = + writer.append(createProtoRows(new String[] {"B"}), 1); + + assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); + ExecutionException ex = + assertThrows( + ExecutionException.class, + () -> { + appendFuture2.get(); + }); + assertEquals( + Status.Code.INVALID_ARGUMENT, + ((StatusRuntimeException) ex.getCause()).getStatus().getCode()); + } + + @Test + void testGetDefaultStreamName() { + TableName tableName = TableName.of("projectId", "datasetId", "tableId"); + + String actualDefaultName = StreamWriter.getDefaultStreamName(tableName); + + assertEquals( + "projects/projectId/datasets/datasetId/tables/tableId/_default", actualDefaultName); + } + + @Test + void testLocationCacheIsHit() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .setLocation("oklahoma") + .build(); + testBigQueryWrite.addResponse(expectedResponse); + + // first stream will result in call to getWriteStream for location lookup + StreamWriter writer1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setEnableConnectionPool(true) + .build(); + + // second stream will hit the location cache + StreamWriter writer2 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setEnableConnectionPool(true) + .build(); + assertEquals(1, testBigQueryWrite.getWriteStreamRequests().size()); + } + + @Test + void testLocationCacheExpires() throws Exception { + // force cache to expire in 1000 millis + StreamWriter.recreateProjectLocationCache(1000); + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .setLocation("oklahoma") + .build(); + testBigQueryWrite.addResponse(expectedResponse); + testBigQueryWrite.addResponse(expectedResponse); + + // first stream will result in call to getWriteStream for location lookup + StreamWriter writer1 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setEnableConnectionPool(true) + .build(); + + // force cache to expire + TimeUnit.SECONDS.sleep(2); + + // second stream will result in call to getWriteStream for location lookup + StreamWriter writer2 = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setEnableConnectionPool(true) + .build(); + assertEquals(2, testBigQueryWrite.getWriteStreamRequests().size()); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryBigDecimalByteStringEncoderTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryBigDecimalByteStringEncoderTest.java new file mode 100644 index 000000000000..4be67ba01eb2 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryBigDecimalByteStringEncoderTest.java @@ -0,0 +1,186 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.google.api.core.ApiFuture; +import com.google.cloud.ServiceOptions; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field.Mode; +import com.google.cloud.bigquery.FieldValueList; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import com.google.cloud.bigquery.TableResult; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult; +import com.google.cloud.bigquery.storage.v1.BigDecimalByteStringEncoder; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import com.google.cloud.bigquery.storage.v1.TableFieldSchema; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.cloud.bigquery.storage.v1.TableSchema; +import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; +import com.google.protobuf.Descriptors; +import java.io.IOException; +import java.math.BigDecimal; +import java.util.Iterator; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import org.json.JSONArray; +import org.json.JSONObject; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +class ITBigQueryBigDecimalByteStringEncoderTest { + private static final Logger LOG = + Logger.getLogger(ITBigQueryBigDecimalByteStringEncoderTest.class.getName()); + private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); + private static final String TABLE = "testtable"; + private static final String DESCRIPTION = "BigQuery Write Java manual client test dataset"; + + private static BigQueryWriteClient client; + private static TableInfo tableInfo; + private static BigQuery bigquery; + + @BeforeAll + static void beforeAll() throws IOException { + client = BigQueryWriteClient.create(); + + RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); + bigquery = bigqueryHelper.getOptions().getService(); + DatasetInfo datasetInfo = + DatasetInfo.newBuilder(/* datasetId= */ DATASET).setDescription(DESCRIPTION).build(); + bigquery.create(datasetInfo); + tableInfo = + TableInfo.newBuilder( + TableId.of(DATASET, TABLE), + StandardTableDefinition.of( + Schema.of( + com.google.cloud.bigquery.Field.newBuilder( + "test_numeric_zero", StandardSQLTypeName.NUMERIC) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_numeric_one", StandardSQLTypeName.NUMERIC) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_numeric_repeated", StandardSQLTypeName.NUMERIC) + .setMode(Mode.REPEATED) + .build()))) + .build(); + bigquery.create(tableInfo); + } + + @AfterAll + static void afterAll() throws InterruptedException { + if (client != null) { + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + if (bigquery != null) { + RemoteBigQueryHelper.forceDelete(bigquery, DATASET); + } + } + + @Test + void TestBigDecimalEncoding() + throws IOException, + InterruptedException, + ExecutionException, + Descriptors.DescriptorValidationException { + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, TABLE); + TableFieldSchema TEST_NUMERIC_ZERO = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_zero") + .build(); + TableFieldSchema TEST_NUMERIC_ONE = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_numeric_one") + .build(); + TableFieldSchema TEST_NUMERIC_REPEATED = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_numeric_repeated") + .build(); + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields(0, TEST_NUMERIC_ZERO) + .addFields(1, TEST_NUMERIC_ONE) + .addFields(2, TEST_NUMERIC_REPEATED) + .build(); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(parent.toString(), tableSchema).build()) { + JSONObject row = new JSONObject(); + row.put( + "test_numeric_zero", + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("0"))); + row.put( + "test_numeric_one", + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("1.2"))); + row.put( + "test_numeric_repeated", + new JSONArray( + new byte[][] { + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("0")) + .toByteArray(), + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("1.2")) + .toByteArray(), + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("-1.2")) + .toByteArray(), + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal("99999999999999999999999999999.999999999")) + .toByteArray(), + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal("-99999999999999999999999999999.999999999")) + .toByteArray(), + })); + JSONArray jsonArr = new JSONArray(new JSONObject[] {row}); + ApiFuture response = jsonStreamWriter.append(jsonArr, -1); + AppendRowsResponse arr = response.get(); + AppendResult ar = arr.getAppendResult(); + boolean ho = ar.hasOffset(); + TableResult result = + bigquery.listTableData( + tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); + Iterator iter = result.getValues().iterator(); + FieldValueList currentRow; + currentRow = iter.next(); + assertEquals("0", currentRow.get(0).getStringValue()); + assertEquals("1.2", currentRow.get(1).getStringValue()); + assertEquals("0", currentRow.get(2).getRepeatedValue().get(0).getStringValue()); + assertEquals("1.2", currentRow.get(2).getRepeatedValue().get(1).getStringValue()); + assertEquals("-1.2", currentRow.get(2).getRepeatedValue().get(2).getStringValue()); + assertEquals( + "99999999999999999999999999999.999999999", + currentRow.get(2).getRepeatedValue().get(3).getStringValue()); + assertEquals( + "-99999999999999999999999999999.999999999", + currentRow.get(2).getRepeatedValue().get(4).getStringValue()); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageLongRunningTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageLongRunningTest.java new file mode 100644 index 000000000000..440641ee404a --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageLongRunningTest.java @@ -0,0 +1,147 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.ServiceOptions; +import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1.DataFormat; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.ReadStream; +import com.google.cloud.bigquery.storage.v1.it.util.BigQueryResource; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +/** + * Integration tests for BigQuery Storage API which target long running sessions. These tests can be + * enabled by setting the system property 'bigquery.storage.enable_long_running_tests' to true. + */ +class ITBigQueryStorageLongRunningTest { + + private static final Logger LOG = + Logger.getLogger(ITBigQueryStorageLongRunningTest.class.getName()); + + private static final String LONG_TESTS_ENABLED_PROPERTY = + "bigquery.storage.enable_long_running_tests"; + + private static final String LONG_TESTS_DISABLED_MESSAGE = + String.format( + "BigQuery Storage long running tests are not enabled and will be skipped. " + + "To enable them, set system property '%s' to true.", + LONG_TESTS_ENABLED_PROPERTY); + + private static BigQueryReadClient client; + private static String parentProjectId; + + @BeforeAll + static void beforeAll() throws IOException { + Assumptions.assumeTrue( + Boolean.getBoolean(LONG_TESTS_ENABLED_PROPERTY), LONG_TESTS_DISABLED_MESSAGE); + client = BigQueryReadClient.create(); + parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId()); + + LOG.info( + String.format( + "%s tests running with parent project: %s", + ITBigQueryStorageLongRunningTest.class.getSimpleName(), parentProjectId)); + } + + @AfterAll + static void afterAll() throws InterruptedException { + if (client != null) { + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + } + + @Test + void testLongRunningReadSession() throws InterruptedException, ExecutionException { + // This test reads a larger table with the goal of doing a simple validation of timeout settings + // for a longer running session. + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "wikipedia"); + + ReadSession session = + client.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 5); + + assertEquals( + 5, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + List> tasks = new ArrayList<>(session.getStreamsCount()); + for (final ReadStream stream : session.getStreamsList()) { + tasks.add(() -> readAllRowsFromStream(stream)); + } + + ExecutorService executor = Executors.newFixedThreadPool(tasks.size()); + List> results = executor.invokeAll(tasks); + executor.shutdown(); + + long rowCount = 0; + for (Future result : results) { + rowCount += result.get(); + } + + assertEquals(313_797_035, rowCount); + } + + private long readAllRowsFromStream(ReadStream readStream) { + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(readStream.getName()).build(); + + long rowCount = 0; + ServerStream serverStream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : serverStream) { + rowCount += response.getRowCount(); + } + + LOG.info( + String.format("Read total of %d rows from stream '%s'.", rowCount, readStream.getName())); + return rowCount; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageReadClientTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageReadClientTest.java new file mode 100644 index 000000000000..17923364a1d3 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageReadClientTest.java @@ -0,0 +1,1926 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it; + +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_COLUMN_NAME; +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ServerStream; +import com.google.api.gax.rpc.UnauthenticatedException; +import com.google.cloud.RetryOption; +import com.google.cloud.ServiceOptions; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.Field.Mode; +import com.google.cloud.bigquery.FieldElementType; +import com.google.cloud.bigquery.Job; +import com.google.cloud.bigquery.JobInfo; +import com.google.cloud.bigquery.JobInfo.WriteDisposition; +import com.google.cloud.bigquery.LegacySQLTypeName; +import com.google.cloud.bigquery.QueryJobConfiguration; +import com.google.cloud.bigquery.Range; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import com.google.cloud.bigquery.TimePartitioning; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions; +import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1.BigQueryReadSettings; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.DataFormat; +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions; +import com.google.cloud.bigquery.storage.v1.ReadStream; +import com.google.cloud.bigquery.storage.v1.TableFieldSchema; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.cloud.bigquery.storage.v1.TableSchema; +import com.google.cloud.bigquery.storage.v1.it.util.BigQueryResource; +import com.google.cloud.bigquery.storage.v1.it.util.Helper; +import com.google.cloud.bigquery.storage.v1.it.util.SimpleRowReaderArrow; +import com.google.cloud.bigquery.storage.v1.it.util.SimpleRowReaderArrow.ArrowRangeBatchConsumer; +import com.google.cloud.bigquery.storage.v1.it.util.SimpleRowReaderAvro; +import com.google.cloud.bigquery.storage.v1.it.util.SimpleRowReaderAvro.AvroRowConsumer; +import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import com.google.protobuf.Int64Value; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import java.io.IOException; +import java.math.BigDecimal; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import org.apache.avro.Conversions; +import org.apache.avro.LogicalTypes; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.util.Utf8; +import org.json.JSONArray; +import org.json.JSONObject; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +/** Integration tests for BigQuery Storage API. */ +class ITBigQueryStorageReadClientTest { + private static final Logger LOG = + Logger.getLogger(ITBigQueryStorageReadClientTest.class.getName()); + private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); + private static final String DESCRIPTION = "BigQuery Storage Java client test dataset"; + private static final String BQSTORAGE_TIMESTAMP_READ_TABLE = "bqstorage_timestamp_read"; + private static final int SHAKESPEARE_SAMPLE_ROW_COUNT = 164_656; + private static final int SHAKESPEARE_SAMPELS_ROWS_MORE_THAN_100_WORDS = 1_333; + private static final int MAX_STREAM_COUNT = 1; + + private static BigQueryReadClient readClient; + private static String projectName; + private static String parentProjectId; + private static BigQuery bigquery; + + private static final String FAKE_JSON_CRED_WITH_GOOGLE_DOMAIN = + "{\n" + + " \"private_key_id\": \"somekeyid\",\n" + + " \"private_key\": \"-----BEGIN PRIVATE KEY-----\\n" + + "MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC+K2hSuFpAdrJI\\n" + + "nCgcDz2M7t7bjdlsadsasad+fvRSW6TjNQZ3p5LLQY1kSZRqBqylRkzteMOyHgaR\\n" + + "0Pmxh3ILCND5men43j3h4eDbrhQBuxfEMalkG92sL+PNQSETY2tnvXryOvmBRwa/\\n" + + "QP/9dJfIkIDJ9Fw9N4Bhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "knddadwkwewcVxHFhcZJO+XWf6ofLUXpRwiTZakGMn8EE1uVa2LgczOjwWHGi99MFjxSer5m9\\n" + + "1tCa3/KEGKiS/YL71JvjwX3mb+cewlkcmweBKZHM2JPTk0ZednFSpVZMtycjkbLa\\n" + + "dYOS8V85AgMBewECggEBAKksaldajfDZDV6nGqbFjMiizAKJolr/M3OQw16K6o3/\\n" + + "0S31xIe3sSlgW0+UbYlF4U8KifhManD1apVSC3csafaspP4RZUHFhtBywLO9pR5c\\n" + + "r6S5aLp+gPWFyIp1pfXbWGvc5VY/v9x7ya1VEa6rXvLsKupSeWAW4tMj3eo/64ge\\n" + + "sdaceaLYw52KeBYiT6+vpsnYrEkAHO1fF/LavbLLOFJmFTMxmsNaG0tuiJHgjshB\\n" + + "82DpMCbXG9YcCgI/DbzuIjsdj2JC1cascSP//3PmefWysucBQe7Jryb6NQtASmnv\\n" + + "CdDw/0jmZTEjpe4S1lxfHplAhHFtdgYTvyYtaLZiVVkCgYEA8eVpof2rceecw/I6\\n" + + "5ng1q3Hl2usdWV/4mZMvR0fOemacLLfocX6IYxT1zA1FFJlbXSRsJMf/Qq39mOR2\\n" + + "SpW+hr4jCoHeRVYLgsbggtrevGmILAlNoqCMpGZ6vDmJpq6ECV9olliDvpPgWOP+\\n" + + "mYPDreFBGxWvQrADNbRt2dmGsrsCgYEAyUHqB2wvJHFqdmeBsaacewzV8x9WgmeX\\n" + + "gUIi9REwXlGDW0Mz50dxpxcKCAYn65+7TCnY5O/jmL0VRxU1J2mSWyWTo1C+17L0\\n" + + "3fUqjxL1pkefwecxwecvC+gFFYdJ4CQ/MHHXU81Lwl1iWdFCd2UoGddYaOF+KNeM\\n" + + "HC7cmqra+JsCgYEAlUNywzq8nUg7282E+uICfCB0LfwejuymR93CtsFgb7cRd6ak\\n" + + "ECR8FGfCpH8ruWJINllbQfcHVCX47ndLZwqv3oVFKh6pAS/vVI4dpOepP8++7y1u\\n" + + "coOvtreXCX6XqfrWDtKIvv0vjlHBhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "kndj5uNl5SiuVxHFhcZJO+XWf6ofLUregtevZakGMn8EE1uVa2AY7eafmoU/nZPT\\n" + + "00YB0TBATdCbn/nBSuKDESkhSg9s2GEKQZG5hBmL5uCMfo09z3SfxZIhJdlerreP\\n" + + "J7gSidI12N+EZxYd4xIJh/HFDgp7RRO87f+WJkofMQKBgGTnClK1VMaCRbJZPriw\\n" + + "EfeFCoOX75MxKwXs6xgrw4W//AYGGUjDt83lD6AZP6tws7gJ2IwY/qP7+lyhjEqN\\n" + + "HtfPZRGFkGZsdaksdlaksd323423d+15/UvrlRSFPNj1tWQmNKkXyRDW4IG1Oa2p\\n" + + "rALStNBx5Y9t0/LQnFI4w3aG\\n" + + "-----END PRIVATE KEY-----\\n" + + "\",\n" + + " \"project_id\": \"someprojectid\",\n" + + " \"client_email\": \"someclientid@developer.gserviceaccount.com\",\n" + + " \"client_id\": \"someclientid.apps.googleusercontent.com\",\n" + + " \"type\": \"service_account\",\n" + + " \"universe_domain\": \"googleapis.com\"\n" + + "}"; + + private static final String FAKE_JSON_CRED_WITH_INVALID_DOMAIN = + "{\n" + + " \"private_key_id\": \"somekeyid\",\n" + + " \"private_key\": \"-----BEGIN PRIVATE KEY-----\\n" + + "MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC+K2hSuFpAdrJI\\n" + + "nCgcDz2M7t7bjdlsadsasad+fvRSW6TjNQZ3p5LLQY1kSZRqBqylRkzteMOyHgaR\\n" + + "0Pmxh3ILCND5men43j3h4eDbrhQBuxfEMalkG92sL+PNQSETY2tnvXryOvmBRwa/\\n" + + "QP/9dJfIkIDJ9Fw9N4Bhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "knddadwkwewcVxHFhcZJO+XWf6ofLUXpRwiTZakGMn8EE1uVa2LgczOjwWHGi99MFjxSer5m9\\n" + + "1tCa3/KEGKiS/YL71JvjwX3mb+cewlkcmweBKZHM2JPTk0ZednFSpVZMtycjkbLa\\n" + + "dYOS8V85AgMBewECggEBAKksaldajfDZDV6nGqbFjMiizAKJolr/M3OQw16K6o3/\\n" + + "0S31xIe3sSlgW0+UbYlF4U8KifhManD1apVSC3csafaspP4RZUHFhtBywLO9pR5c\\n" + + "r6S5aLp+gPWFyIp1pfXbWGvc5VY/v9x7ya1VEa6rXvLsKupSeWAW4tMj3eo/64ge\\n" + + "sdaceaLYw52KeBYiT6+vpsnYrEkAHO1fF/LavbLLOFJmFTMxmsNaG0tuiJHgjshB\\n" + + "82DpMCbXG9YcCgI/DbzuIjsdj2JC1cascSP//3PmefWysucBQe7Jryb6NQtASmnv\\n" + + "CdDw/0jmZTEjpe4S1lxfHplAhHFtdgYTvyYtaLZiVVkCgYEA8eVpof2rceecw/I6\\n" + + "5ng1q3Hl2usdWV/4mZMvR0fOemacLLfocX6IYxT1zA1FFJlbXSRsJMf/Qq39mOR2\\n" + + "SpW+hr4jCoHeRVYLgsbggtrevGmILAlNoqCMpGZ6vDmJpq6ECV9olliDvpPgWOP+\\n" + + "mYPDreFBGxWvQrADNbRt2dmGsrsCgYEAyUHqB2wvJHFqdmeBsaacewzV8x9WgmeX\\n" + + "gUIi9REwXlGDW0Mz50dxpxcKCAYn65+7TCnY5O/jmL0VRxU1J2mSWyWTo1C+17L0\\n" + + "3fUqjxL1pkefwecxwecvC+gFFYdJ4CQ/MHHXU81Lwl1iWdFCd2UoGddYaOF+KNeM\\n" + + "HC7cmqra+JsCgYEAlUNywzq8nUg7282E+uICfCB0LfwejuymR93CtsFgb7cRd6ak\\n" + + "ECR8FGfCpH8ruWJINllbQfcHVCX47ndLZwqv3oVFKh6pAS/vVI4dpOepP8++7y1u\\n" + + "coOvtreXCX6XqfrWDtKIvv0vjlHBhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "kndj5uNl5SiuVxHFhcZJO+XWf6ofLUregtevZakGMn8EE1uVa2AY7eafmoU/nZPT\\n" + + "00YB0TBATdCbn/nBSuKDESkhSg9s2GEKQZG5hBmL5uCMfo09z3SfxZIhJdlerreP\\n" + + "J7gSidI12N+EZxYd4xIJh/HFDgp7RRO87f+WJkofMQKBgGTnClK1VMaCRbJZPriw\\n" + + "EfeFCoOX75MxKwXs6xgrw4W//AYGGUjDt83lD6AZP6tws7gJ2IwY/qP7+lyhjEqN\\n" + + "HtfPZRGFkGZsdaksdlaksd323423d+15/UvrlRSFPNj1tWQmNKkXyRDW4IG1Oa2p\\n" + + "rALStNBx5Y9t0/LQnFI4w3aG\\n" + + "-----END PRIVATE KEY-----\\n" + + "\",\n" + + " \"project_id\": \"someprojectid\",\n" + + " \"client_email\": \"someclientid@developer.gserviceaccount.com\",\n" + + " \"client_id\": \"someclientid.apps.googleusercontent.com\",\n" + + " \"type\": \"service_account\",\n" + + " \"universe_domain\": \"fake.domain\"\n" + + "}"; + + private static final com.google.cloud.bigquery.Schema RANGE_SCHEMA = + com.google.cloud.bigquery.Schema.of( + Field.newBuilder("name", StandardSQLTypeName.STRING) + .setMode(Field.Mode.NULLABLE) + .setDescription("Name of the row") + .build(), + Field.newBuilder("date", StandardSQLTypeName.RANGE) + .setMode(Field.Mode.NULLABLE) + .setDescription("Range field with DATE") + .setRangeElementType(FieldElementType.newBuilder().setType("DATE").build()) + .build(), + Field.newBuilder("datetime", StandardSQLTypeName.RANGE) + .setMode(Field.Mode.NULLABLE) + .setDescription("Range field with DATETIME") + .setRangeElementType(FieldElementType.newBuilder().setType("DATETIME").build()) + .build(), + Field.newBuilder("timestamp", StandardSQLTypeName.RANGE) + .setMode(Field.Mode.NULLABLE) + .setDescription("Range field with TIMESTAMP") + .setRangeElementType(FieldElementType.newBuilder().setType("TIMESTAMP").build()) + .build()); + + // storage.v1.TableSchema of RANGE_SCHEMA + private static final TableSchema RANGE_TABLE_SCHEMA = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder() + .setName("name") + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("date") + .setType(TableFieldSchema.Type.RANGE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.DATE) + .build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("datetime") + .setType(TableFieldSchema.Type.RANGE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.DATETIME) + .build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("timestamp") + .setType(TableFieldSchema.Type.RANGE) + .setRangeElementType( + TableFieldSchema.FieldElementType.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .build(); + + private static final Map RANGE_TEST_VALUES_DATES = + new ImmutableMap.Builder() + .put( + "bounded", + Range.newBuilder() + .setStart("2020-01-01") + .setEnd("2020-12-31") + .setType(FieldElementType.newBuilder().setType("DATE").build()) + .build()) + .put( + "unboundedStart", + Range.newBuilder() + .setStart(null) + .setEnd("2020-12-31") + .setType(FieldElementType.newBuilder().setType("DATE").build()) + .build()) + .put( + "unboundedEnd", + Range.newBuilder() + .setStart("2020-01-01") + .setEnd(null) + .setType(FieldElementType.newBuilder().setType("DATE").build()) + .build()) + .put( + "unbounded", + Range.newBuilder() + .setStart(null) + .setEnd(null) + .setType(FieldElementType.newBuilder().setType("DATE").build()) + .build()) + .build(); + + // dates are returned as days since epoch + private static final Map RANGE_TEST_VALUES_EXPECTED_DATES = + new ImmutableMap.Builder() + .put( + "bounded", + Range.newBuilder() + .setStart("18262") + .setEnd("18627") + .setType(FieldElementType.newBuilder().setType("DATE").build()) + .build()) + .put( + "unboundedStart", + Range.newBuilder() + .setStart(null) + .setEnd("18627") + .setType(FieldElementType.newBuilder().setType("DATE").build()) + .build()) + .put( + "unboundedEnd", + Range.newBuilder() + .setStart("18262") + .setEnd(null) + .setType(FieldElementType.newBuilder().setType("DATE").build()) + .build()) + .put( + "unbounded", + Range.newBuilder() + .setStart(null) + .setEnd(null) + .setType(FieldElementType.newBuilder().setType("DATE").build()) + .build()) + .build(); + + private static final ImmutableMap RANGE_TEST_VALUES_DATETIME = + new ImmutableMap.Builder() + .put( + "bounded", + Range.newBuilder() + .setStart("2014-08-19T05:41:35.220000") + .setEnd("2015-09-20T06:41:35.220000") + .setType(FieldElementType.newBuilder().setType("DATETIME").build()) + .build()) + .put( + "unboundedStart", + Range.newBuilder() + .setStart(null) + .setEnd("2015-09-20T06:41:35.220000") + .setType(FieldElementType.newBuilder().setType("DATETIME").build()) + .build()) + .put( + "unboundedEnd", + Range.newBuilder() + .setStart("2014-08-19T05:41:35.220000") + .setEnd(null) + .setType(FieldElementType.newBuilder().setType("DATETIME").build()) + .build()) + .put( + "unbounded", + Range.newBuilder() + .setStart(null) + .setEnd(null) + .setType(FieldElementType.newBuilder().setType("DATETIME").build()) + .build()) + .build(); + + // datetime are returned as up to millisecond precision instead of microsecond input value + private static final ImmutableMap RANGE_TEST_VALUES_EXPECTED_DATETIME = + new ImmutableMap.Builder() + .put( + "bounded", + Range.newBuilder() + .setStart("2014-08-19T05:41:35.220") + .setEnd("2015-09-20T06:41:35.220") + .setType(FieldElementType.newBuilder().setType("DATETIME").build()) + .build()) + .put( + "unboundedStart", + Range.newBuilder() + .setStart(null) + .setEnd("2015-09-20T06:41:35.220") + .setType(FieldElementType.newBuilder().setType("DATETIME").build()) + .build()) + .put( + "unboundedEnd", + Range.newBuilder() + .setStart("2014-08-19T05:41:35.220") + .setEnd(null) + .setType(FieldElementType.newBuilder().setType("DATETIME").build()) + .build()) + .put( + "unbounded", + Range.newBuilder() + .setStart(null) + .setEnd(null) + .setType(FieldElementType.newBuilder().setType("DATETIME").build()) + .build()) + .build(); + + private static final ImmutableMap RANGE_TEST_VALUES_TIMESTAMP = + new ImmutableMap.Builder() + .put( + "bounded", + Range.newBuilder() + .setStart("2014-08-19 12:41:35.220000+00:00") + .setEnd("2015-09-20 13:41:35.220000+01:00") + .setType(FieldElementType.newBuilder().setType("TIMESTAMP").build()) + .build()) + .put( + "unboundedStart", + Range.newBuilder() + .setStart(null) + .setEnd("2015-09-20 13:41:35.220000+01:00") + .setType(FieldElementType.newBuilder().setType("TIMESTAMP").build()) + .build()) + .put( + "unboundedEnd", + Range.newBuilder() + .setStart("2014-08-19 12:41:35.220000+00:00") + .setEnd(null) + .setType(FieldElementType.newBuilder().setType("TIMESTAMP").build()) + .build()) + .put( + "unbounded", + Range.newBuilder() + .setStart(null) + .setEnd(null) + .setType(FieldElementType.newBuilder().setType("TIMESTAMP").build()) + .build()) + .build(); + + // timestamps are returned as seconds since epoch + private static final ImmutableMap RANGE_TEST_VALUES_EXPECTED_TIMESTAMP = + new ImmutableMap.Builder() + .put( + "bounded", + Range.newBuilder() + .setStart("1408452095220000") + .setEnd("1442752895220000") + .setType(FieldElementType.newBuilder().setType("TIMESTAMP").build()) + .build()) + .put( + "unboundedStart", + Range.newBuilder() + .setStart(null) + .setEnd("1442752895220000") + .setType(FieldElementType.newBuilder().setType("TIMESTAMP").build()) + .build()) + .put( + "unboundedEnd", + Range.newBuilder() + .setStart("1408452095220000") + .setEnd(null) + .setType(FieldElementType.newBuilder().setType("TIMESTAMP").build()) + .build()) + .put( + "unbounded", + Range.newBuilder() + .setStart(null) + .setEnd(null) + .setType(FieldElementType.newBuilder().setType("TIMESTAMP").build()) + .build()) + .build(); + + private static final Map, Object>> OTEL_ATTRIBUTES = + new HashMap, Object>>(); + private static final Map OTEL_PARENT_SPAN_IDS = new HashMap<>(); + private static final Map OTEL_SPAN_IDS_TO_NAMES = new HashMap<>(); + + private static class TestSpanExporter implements io.opentelemetry.sdk.trace.export.SpanExporter { + @Override + public CompletableResultCode export(Collection collection) { + if (collection.isEmpty()) { + return CompletableResultCode.ofFailure(); + } + for (SpanData data : collection) { + OTEL_ATTRIBUTES.put(data.getName(), data.getAttributes().asMap()); + OTEL_PARENT_SPAN_IDS.put(data.getName(), data.getParentSpanId()); + OTEL_SPAN_IDS_TO_NAMES.put(data.getSpanId(), data.getName()); + } + return CompletableResultCode.ofSuccess(); + } + + @Override + public CompletableResultCode flush() { + return CompletableResultCode.ofSuccess(); + } + + @Override + public CompletableResultCode shutdown() { + return CompletableResultCode.ofSuccess(); + } + } + + @BeforeAll + static void beforeAll() throws IOException, DescriptorValidationException, InterruptedException { + readClient = BigQueryReadClient.create(); + projectName = ServiceOptions.getDefaultProjectId(); + parentProjectId = String.format("projects/%s", projectName); + + LOG.info( + String.format( + "%s tests running with parent project: %s", + ITBigQueryStorageReadClientTest.class.getSimpleName(), parentProjectId)); + + RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); + bigquery = bigqueryHelper.getOptions().getService(); + DatasetInfo datasetInfo = + DatasetInfo.newBuilder(/* datasetId bigquery= */ DATASET) + .setDescription(DESCRIPTION) + .build(); + bigquery.create(datasetInfo); + LOG.info("Created test dataset: " + DATASET); + + setupTimestampTable(); + } + + private static void setupTimestampTable() + throws DescriptorValidationException, IOException, InterruptedException { + // Schema to create a BQ table + com.google.cloud.bigquery.Schema timestampSchema = + com.google.cloud.bigquery.Schema.of( + Field.newBuilder(TIMESTAMP_COLUMN_NAME, StandardSQLTypeName.TIMESTAMP) + .setMode(Mode.NULLABLE) + .build(), + Field.newBuilder(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME, StandardSQLTypeName.TIMESTAMP) + .setTimestampPrecision(Helper.PICOSECOND_PRECISION) + .setMode(Mode.NULLABLE) + .build()); + + // Create BQ table with timestamps + TableId tableId = TableId.of(DATASET, BQSTORAGE_TIMESTAMP_READ_TABLE); + bigquery.create(TableInfo.of(tableId, StandardTableDefinition.of(timestampSchema))); + + TableName parentTable = TableName.of(projectName, DATASET, BQSTORAGE_TIMESTAMP_READ_TABLE); + + // Define the BQStorage schema to write to + TableSchema timestampTableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder() + .setName(TIMESTAMP_COLUMN_NAME) + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME) + .setTimestampPrecision( + Int64Value.newBuilder().setValue(Helper.PICOSECOND_PRECISION).build()) + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .build(); + + try (JsonStreamWriter writer = + JsonStreamWriter.newBuilder(parentTable.toString(), timestampTableSchema).build()) { + JSONArray data = new JSONArray(); + for (Object[] timestampData : Helper.INPUT_TIMESTAMPS) { + JSONObject row = new JSONObject(); + row.put(TIMESTAMP_COLUMN_NAME, timestampData[0]); + row.put(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME, timestampData[1]); + data.put(row); + } + + ApiFuture future = writer.append(data); + // The append method is asynchronous. Rather than waiting for the method to complete, + // which can hurt performance, register a completion callback and continue streaming. + ApiFutures.addCallback( + future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor()); + } + } + + @AfterAll + static void afterAll() throws InterruptedException { + if (readClient != null) { + readClient.close(); + readClient.awaitTermination(10, TimeUnit.SECONDS); + } + + if (bigquery != null) { + RemoteBigQueryHelper.forceDelete(bigquery, DATASET); + LOG.info("Deleted test dataset: " + DATASET); + } + } + + @Test + void testSimpleReadAvro() { + String table = + BigQueryResource.formatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + ReadSession session = + readClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + long rowCount = 0; + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + assertEquals(SHAKESPEARE_SAMPLE_ROW_COUNT, rowCount); + } + + @Test + void testSimpleReadArrow() { + String table = + BigQueryResource.formatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + ReadSession session = + readClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.ARROW) + .build(), + /* maxStreamCount= */ 1); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + // Assert that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results + // directly. + Preconditions.checkState(session.getStreamsCount() > 0); + + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + long rowCount = 0; + // Process each block of rows as they arrive and decode using our simple row reader. + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasArrowRecordBatch()); + rowCount += response.getRowCount(); + } + assertEquals(SHAKESPEARE_SAMPLE_ROW_COUNT, rowCount); + } + + @Test + void testRangeTypeSimple() throws InterruptedException { + // Create table with Range values. + String tableName = "test_range_type_read" + UUID.randomUUID().toString().substring(0, 8); + TableId tableId = TableId.of(DATASET, tableName); + QueryJobConfiguration createTable = + QueryJobConfiguration.newBuilder( + String.format( + "CREATE TABLE %s AS SELECT RANGE(DATE '2020-01-01', DATE '2020-12-31') as date," + + " \n" + + "RANGE(DATETIME '2020-01-01T12:00:00', DATETIME '2020-12-31T12:00:00') as" + + " datetime, \n" + + "RANGE(TIMESTAMP '2014-01-01 07:00:00.000000+00:00', TIMESTAMP" + + " '2015-01-01 07:00:00.000000+00:00') as timestamp", + tableName)) + .setDefaultDataset(DatasetId.of(DATASET)) + .setUseLegacySql(false) + .build(); + bigquery.query(createTable); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ ServiceOptions.getDefaultProjectId(), + /* datasetId= */ DATASET, + /* tableId= */ tableId.getTable()); + + ReadSession session = + readClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.ARROW) + .build(), + /* maxStreamCount= */ 1); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + // Assert that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results + // directly. + Preconditions.checkState(session.getStreamsCount() > 0); + + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + long rowCount = 0; + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasArrowRecordBatch()); + rowCount += response.getRowCount(); + } + assertEquals(1, rowCount); + } + + @Test + void testRangeTypeWrite() + throws InterruptedException, IOException, DescriptorValidationException { + // Create table with Range values. + String tableName = "test_range_type_write" + UUID.randomUUID().toString().substring(0, 8); + TableId tableId = TableId.of(DATASET, tableName); + bigquery.create(TableInfo.of(tableId, StandardTableDefinition.of(RANGE_SCHEMA))); + + TableName parentTable = TableName.of(projectName, DATASET, tableName); + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelayDuration(Duration.ofMinutes(1)) + .build(); + try (JsonStreamWriter writer = + JsonStreamWriter.newBuilder(parentTable.toString(), RANGE_TABLE_SCHEMA) + .setRetrySettings(retrySettings) + .build()) { + + // Write 4 rows of data to the table with and without unbounded values. + JSONArray data = new JSONArray(); + for (String name : RANGE_TEST_VALUES_DATES.keySet()) { + JSONObject row = new JSONObject(); + row.put("name", name); + + JSONObject dateColumn = new JSONObject(); + Range date = RANGE_TEST_VALUES_DATES.get(name); + if ((!date.getStart().isNull()) && (date.getStart().getStringValue() != null)) { + dateColumn.put("start", date.getStart().getStringValue()); + } + if ((!date.getEnd().isNull()) && (date.getEnd().getStringValue() != null)) { + dateColumn.put("end", date.getEnd().getStringValue()); + } + row.put("daTE", dateColumn); + + JSONObject datetimeColumn = new JSONObject(); + Range datetime = RANGE_TEST_VALUES_DATETIME.get(name); + if ((!datetime.getStart().isNull()) && (datetime.getStart().getStringValue() != null)) { + datetimeColumn.put("start", datetime.getStart().getStringValue()); + } + if ((!datetime.getEnd().isNull()) && (datetime.getEnd().getStringValue() != null)) { + datetimeColumn.put("end", datetime.getEnd().getStringValue()); + } + row.put("daTEtiME", datetimeColumn); + + JSONObject timestampColumn = new JSONObject(); + Range timestamp = RANGE_TEST_VALUES_TIMESTAMP.get(name); + if ((!timestamp.getStart().isNull()) && (timestamp.getStart().getStringValue() != null)) { + timestampColumn.put("start", timestamp.getStart().getStringValue()); + } + if ((!timestamp.getEnd().isNull()) && (timestamp.getEnd().getStringValue() != null)) { + timestampColumn.put("end", timestamp.getEnd().getStringValue()); + } + row.put("tiMEstAMp", timestampColumn); + + data.put(row); + } + + ApiFuture future = writer.append(data); + // The append method is asynchronous. Rather than waiting for the method to complete, + // which can hurt performance, register a completion callback and continue streaming. + ApiFutures.addCallback( + future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor()); + } + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ projectName, + /* datasetId= */ DATASET, + /* tableId= */ tableId.getTable()); + ReadSession session = + readClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.ARROW) + .build(), + /* maxStreamCount= */ 1); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + // Assert that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results + // directly. + Preconditions.checkState(session.getStreamsCount() > 0); + + // Set up a simple reader and start a read session. + try (SimpleRowReaderArrow reader = new SimpleRowReaderArrow(session.getArrowSchema())) { + + // Assert that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results + // directly. + Preconditions.checkState(session.getStreamsCount() > 0); + + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + long rowCount = 0; + // Process each block of rows as they arrive and decode using our simple row reader. + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasArrowRecordBatch()); + reader.processRows( + response.getArrowRecordBatch(), + new ArrowRangeBatchConsumer( + RANGE_TEST_VALUES_EXPECTED_DATES, + RANGE_TEST_VALUES_EXPECTED_DATETIME, + RANGE_TEST_VALUES_EXPECTED_TIMESTAMP)); + rowCount += response.getRowCount(); + } + assertEquals(RANGE_TEST_VALUES_DATES.size(), rowCount); + } + } + + // Tests that inputs for micros and picos can be read properly via Arrow + @Test + void timestamp_readArrow() throws IOException { + String table = + BigQueryResource.formatTableResource(projectName, DATASET, BQSTORAGE_TIMESTAMP_READ_TABLE); + ReadSession session = + readClient.createReadSession( + parentProjectId, + ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.ARROW) + .setReadOptions( + TableReadOptions.newBuilder() + .setArrowSerializationOptions( + ArrowSerializationOptions.newBuilder() + // This serialization option only impacts columns that are type + // `TIMESTAMP_PICOS` and has no impact on other columns types + .setPicosTimestampPrecision( + ArrowSerializationOptions.PicosTimestampPrecision + .TIMESTAMP_PRECISION_PICOS) + .build()) + .build()) + .build(), + MAX_STREAM_COUNT); + assertEquals( + MAX_STREAM_COUNT, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + // that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results + // directly. + assertThat(session.getStreamsCount()).isGreaterThan(0); + + // Set up a simple reader and start a read session. + try (SimpleRowReaderArrow reader = new SimpleRowReaderArrow(session.getArrowSchema())) { + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + long rowCount = 0; + // Process each block of rows as they arrive and decode using our simple row reader. + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasArrowRecordBatch()); + reader.processRows( + response.getArrowRecordBatch(), + new SimpleRowReaderArrow.ArrowTimestampBatchConsumer( + Helper.EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT)); + rowCount += response.getRowCount(); + } + assertEquals(Helper.EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT.length, rowCount); + } + } + + // Tests that inputs for micros and picos can be read properly via Avro + @Test + void timestamp_readAvro() throws IOException { + String table = + BigQueryResource.formatTableResource(projectName, DATASET, BQSTORAGE_TIMESTAMP_READ_TABLE); + List rows = Helper.readAllRows(readClient, parentProjectId, table, null); + List timestamps = + rows.stream().map(x -> (Long) x.get(TIMESTAMP_COLUMN_NAME)).collect(Collectors.toList()); + List timestampHigherPrecision = + rows.stream() + .map(x -> x.get(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME).toString()) + .collect(Collectors.toList()); + for (int i = 0; i < timestamps.size(); i++) { + assertEquals(Helper.EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT[i][0], timestamps.get(i)); + } + for (int i = 0; i < timestampHigherPrecision.size(); i++) { + assertEquals( + Helper.EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT[i][1], + timestampHigherPrecision.get(i)); + } + } + + @Test + void testSimpleReadAndResume() { + String table = + BigQueryResource.formatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + ReadSession session = + readClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + long rowCount = readStreamToOffset(session.getStreams(0), /* rowOffset= */ 34_846); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder() + .setReadStream(session.getStreams(0).getName()) + .setOffset(rowCount) + .build(); + + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); + + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + // Verifies that the number of rows skipped and read equals to the total number of rows in the + // table. + assertEquals(SHAKESPEARE_SAMPLE_ROW_COUNT, rowCount); + } + + @Test + void testFilter() throws IOException { + String table = + BigQueryResource.formatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + TableReadOptions options = + TableReadOptions.newBuilder().setRowRestriction("word_count > 100").build(); + + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parentProjectId) + .setMaxStreamCount(1) + .setReadSession( + ReadSession.newBuilder() + .setTable(table) + .setReadOptions(options) + .setDataFormat(DataFormat.AVRO) + .build()) + .build(); + + ReadSession session = readClient.createReadSession(request); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + SimpleRowReaderAvro reader = + new SimpleRowReaderAvro(new Schema.Parser().parse(session.getAvroSchema().getSchema())); + + long rowCount = 0; + + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + reader.processRows( + response.getAvroRows(), + (AvroRowConsumer) + record -> { + String rowAssertMessage = + String.format("Row not matching expectations: %s", record.toString()); + + Long wordCount = (Long) record.get("word_count"); + assertWithMessage(rowAssertMessage).that(wordCount).isGreaterThan(100L); + + Utf8 word = (Utf8) record.get("word"); + assertWithMessage(rowAssertMessage).that(word.length()).isGreaterThan(0); + }); + } + + assertEquals(1_333, rowCount); + } + + @Test + void testColumnSelection() throws IOException { + String table = + BigQueryResource.formatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + TableReadOptions options = + TableReadOptions.newBuilder() + .addSelectedFields("word") + .addSelectedFields("word_count") + .setRowRestriction("word_count > 100") + .build(); + + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parentProjectId) + .setMaxStreamCount(1) + .setReadSession( + ReadSession.newBuilder() + .setTable(table) + .setReadOptions(options) + .setDataFormat(DataFormat.AVRO) + .build()) + .build(); + + ReadSession session = readClient.createReadSession(request); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + Schema avroSchema = new Schema.Parser().parse(session.getAvroSchema().getSchema()); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + + assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage); + assertEquals( + Schema.Type.STRING, avroSchema.getField("word").schema().getType(), actualSchemaMessage); + assertEquals( + Schema.Type.LONG, + avroSchema.getField("word_count").schema().getType(), + actualSchemaMessage); + + SimpleRowReaderAvro reader = new SimpleRowReaderAvro(avroSchema); + + long rowCount = 0; + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + reader.processRows( + response.getAvroRows(), + (AvroRowConsumer) + record -> { + String rowAssertMessage = + String.format("Row not matching expectations: %s", record.toString()); + + Long wordCount = (Long) record.get("word_count"); + assertWithMessage(rowAssertMessage).that(wordCount).isGreaterThan(100L); + + Utf8 word = (Utf8) record.get("word"); + assertWithMessage(rowAssertMessage).that(word.length()).isGreaterThan(0); + }); + } + + assertEquals(SHAKESPEARE_SAMPELS_ROWS_MORE_THAN_100_WORDS, rowCount); + } + + @Test + void testReadAtSnapshot() throws InterruptedException, IOException { + Field intFieldSchema = + Field.newBuilder("col", LegacySQLTypeName.INTEGER) + .setMode(Mode.REQUIRED) + .setDescription("IntegerDescription") + .build(); + com.google.cloud.bigquery.Schema tableSchema = + com.google.cloud.bigquery.Schema.of(intFieldSchema); + + TableId testTableId = TableId.of(/* dataset= */ DATASET, /* table= */ "test_read_snapshot"); + bigquery.create(TableInfo.of(testTableId, StandardTableDefinition.of(tableSchema))); + + Job firstJob = + runQueryAppendJobAndExpectSuccess( + /* destinationTableId= */ testTableId, /* query= */ "SELECT 1 AS col"); + + Job secondJob = + runQueryAppendJobAndExpectSuccess( + /* destinationTableId= */ testTableId, /* query= */ "SELECT 2 AS col"); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ projectName, + /* datasetId= */ DATASET, + /* tableId= */ testTableId.getTable()); + + final List rowsAfterFirstSnapshot = new ArrayList<>(); + Helper.processRowsAtSnapshot( + readClient, + parentProjectId, + /* table= */ table, + /* snapshotInMillis= */ firstJob.getStatistics().getEndTime(), + /* filter= */ null, + /* consumer= */ new AvroRowConsumer() { + @Override + public void accept(GenericData.Record record) { + rowsAfterFirstSnapshot.add((Long) record.get("col")); + } + }); + assertEquals(Collections.singletonList(1L), rowsAfterFirstSnapshot); + + final List rowsAfterSecondSnapshot = new ArrayList<>(); + Helper.processRowsAtSnapshot( + readClient, + parentProjectId, + /* table= */ table, + /* snapshotInMillis= */ secondJob.getStatistics().getEndTime(), + /* filter= */ null, + /* consumer= */ new AvroRowConsumer() { + @Override + public void accept(GenericData.Record record) { + rowsAfterSecondSnapshot.add((Long) record.get("col")); + } + }); + Collections.sort(rowsAfterSecondSnapshot); + assertEquals(Arrays.asList(1L, 2L), rowsAfterSecondSnapshot); + } + + @Test + void testColumnPartitionedTableByDateField() throws InterruptedException, IOException { + String partitionedTableName = + "test_column_partition_table_by_date" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s (num_field INT64, date_field DATE) " + + " PARTITION BY date_field " + + " OPTIONS( " + + " description=\"a table partitioned by date_field\" " + + " ) " + + "AS " + + " SELECT 1, CAST(\"2019-01-01\" AS DATE)" + + " UNION ALL" + + " SELECT 2, CAST(\"2019-01-02\" AS DATE)" + + " UNION ALL" + + " SELECT 3, CAST(\"2019-01-03\" AS DATE)", + DATASET, partitionedTableName); + + runQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ projectName, + /* datasetId= */ DATASET, + /* tableId= */ partitionedTableName); + + List unfilteredRows = + Helper.readAllRows(readClient, parentProjectId, /* table= */ table, /* filter= */ null); + assertEquals(3, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString()); + + List partitionFilteredRows = + Helper.readAllRows( + readClient, + parentProjectId, + /* table= */ table, + /* filter= */ "date_field = CAST(\"2019-01-02\" AS DATE)"); + assertEquals( + 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString()); + assertEquals(2L, partitionFilteredRows.get(0).get("num_field")); + } + + @Test + void testIngestionTimePartitionedTable() throws InterruptedException, IOException { + Field intFieldSchema = + Field.newBuilder("num_field", LegacySQLTypeName.INTEGER) + .setMode(Mode.REQUIRED) + .setDescription("IntegerDescription") + .build(); + com.google.cloud.bigquery.Schema tableSchema = + com.google.cloud.bigquery.Schema.of(intFieldSchema); + + TableId testTableId = + TableId.of(/* dataset= */ DATASET, /* table= */ "test_date_partitioned_table"); + bigquery.create( + TableInfo.of( + testTableId, + StandardTableDefinition.newBuilder() + .setTimePartitioning(TimePartitioning.of(TimePartitioning.Type.DAY)) + .setSchema(tableSchema) + .build())); + + // Simulate ingestion for 2019-01-01. + runQueryAppendJobAndExpectSuccess( + /* destinationTableId= */ TableId.of( + /* dataset= */ DATASET, /* table= */ testTableId.getTable() + "$20190101"), + /* query= */ "SELECT 1 AS num_field"); + + // Simulate ingestion for 2019-01-02. + runQueryAppendJobAndExpectSuccess( + /* destinationTableId= */ TableId.of( + /* dataset= */ DATASET, /* table= */ testTableId.getTable() + "$20190102"), + /* query= */ "SELECT 2 AS num_field"); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ projectName, + /* datasetId= */ testTableId.getDataset(), + /* tableId= */ testTableId.getTable()); + + List unfilteredRows = + Helper.readAllRows(readClient, parentProjectId, /* table= */ table, /* filter= */ null); + assertEquals(2, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString()); + + List partitionFilteredRows = + Helper.readAllRows( + readClient, + parentProjectId, + /* table= */ table, + /* filter= */ "_PARTITIONDATE > \"2019-01-01\""); + assertEquals( + 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString()); + assertEquals(2L, partitionFilteredRows.get(0).get("num_field")); + } + + @Test + void testBasicSqlTypes() throws InterruptedException, IOException { + String tableName = "test_basic_sql_types" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s " + + " (int_field INT64 NOT NULL," + + " num_field NUMERIC NOT NULL," + + " float_field FLOAT64 NOT NULL," + + " bool_field BOOL NOT NULL," + + " str_field STRING NOT NULL," + + " bytes_field BYTES NOT NULL) " + + " OPTIONS( " + + " description=\"a table with basic column types\" " + + " ) " + + "AS " + + " SELECT " + + " 17," + + " CAST(1234.56 AS NUMERIC)," + + " 6.547678," + + " TRUE," + + " \"String field value\"," + + " b\"абвгд\"", + DATASET, tableName); + + runQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ projectName, /* datasetId= */ DATASET, /* tableId= */ tableName); + + List rows = + Helper.readAllRows(readClient, parentProjectId, /* table= */ table, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: \" + rows.toString()"); + + GenericData.Record record = rows.get(0); + Schema avroSchema = record.getSchema(); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); + + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(6, avroSchema.getFields().size(), actualSchemaMessage); + + assertEquals( + Schema.Type.LONG, avroSchema.getField("int_field").schema().getType(), actualSchemaMessage); + assertEquals(17L, (long) record.get("int_field"), rowAssertMessage); + + assertEquals( + Schema.Type.BYTES, + avroSchema.getField("num_field").schema().getType(), + actualSchemaMessage); + assertEquals( + LogicalTypes.decimal(/* precision= */ 38, /* scale= */ 9), + avroSchema.getField("num_field").schema().getLogicalType(), + actualSchemaMessage); + BigDecimal actual_num_field = + new Conversions.DecimalConversion() + .fromBytes( + (ByteBuffer) record.get("num_field"), + avroSchema, + avroSchema.getField("num_field").schema().getLogicalType()); + assertEquals( + BigDecimal.valueOf(/* unscaledVal= */ 1_234_560_000_000L, /* scale= */ 9), + actual_num_field, + rowAssertMessage); + + assertEquals( + Schema.Type.DOUBLE, + avroSchema.getField("float_field").schema().getType(), + actualSchemaMessage); + assertEquals(6.547678d, (double) record.get("float_field"), 0.0001, rowAssertMessage); + + assertEquals( + Schema.Type.BOOLEAN, + avroSchema.getField("bool_field").schema().getType(), + actualSchemaMessage); + assertEquals(true, record.get("bool_field"), rowAssertMessage); + + assertEquals( + Schema.Type.STRING, + avroSchema.getField("str_field").schema().getType(), + actualSchemaMessage); + assertEquals(new Utf8("String field value"), record.get("str_field"), rowAssertMessage); + + assertEquals( + Schema.Type.BYTES, + avroSchema.getField("bytes_field").schema().getType(), + actualSchemaMessage); + assertArrayEquals( + Utf8.getBytesFor("абвгд"), + ((ByteBuffer) (record.get("bytes_field"))).array(), + rowAssertMessage); + } + + @Test + void testDateAndTimeSqlTypes() throws InterruptedException, IOException { + String tableName = + "test_date_and_time_sql_types" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s " + + " (date_field DATE NOT NULL," + + " datetime_field DATETIME NOT NULL," + + " time_field TIME NOT NULL," + + " timestamp_field TIMESTAMP NOT NULL)" + + " OPTIONS( " + + " description=\"a table with date and time column types\" " + + " ) " + + "AS " + + " SELECT " + + " CAST(\"2019-05-31\" AS DATE)," + + " CAST(\"2019-04-30 21:47:59.999999\" AS DATETIME)," + + " CAST(\"21:47:59.999999\" AS TIME)," + + " CAST(\"2019-04-30 19:24:19.123456 UTC\" AS TIMESTAMP)", + DATASET, tableName); + + runQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ projectName, /* datasetId= */ DATASET, /* tableId= */ tableName); + + List rows = + Helper.readAllRows(readClient, parentProjectId, /* table= */ table, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); + + GenericData.Record record = rows.get(0); + Schema avroSchema = record.getSchema(); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); + + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(4, avroSchema.getFields().size(), actualSchemaMessage); + + assertEquals( + Schema.Type.INT, avroSchema.getField("date_field").schema().getType(), actualSchemaMessage); + assertEquals( + LogicalTypes.date(), + avroSchema.getField("date_field").schema().getLogicalType(), + actualSchemaMessage); + assertEquals( + LocalDate.of(/* year= */ 2019, /* month= */ 5, /* dayOfMonth= */ 31), + LocalDate.ofEpochDay((int) record.get("date_field")), + rowAssertMessage); + + assertEquals( + Schema.Type.STRING, + avroSchema.getField("datetime_field").schema().getType(), + actualSchemaMessage); + assertEquals( + "datetime", + avroSchema.getField("datetime_field").schema().getObjectProp("logicalType"), + actualSchemaMessage); + assertEquals( + new Utf8("2019-04-30T21:47:59.999999"), record.get("datetime_field"), rowAssertMessage); + + assertEquals( + Schema.Type.LONG, + avroSchema.getField("time_field").schema().getType(), + actualSchemaMessage); + assertEquals( + LogicalTypes.timeMicros(), + avroSchema.getField("time_field").schema().getLogicalType(), + actualSchemaMessage); + assertEquals( + LocalTime.of( + /* hour= */ 21, /* minute= */ 47, /* second= */ 59, /* nanoOfSecond= */ 999_999_000), + LocalTime.ofNanoOfDay(1_000L * (long) record.get("time_field")), + rowAssertMessage); + + assertEquals( + Schema.Type.LONG, + avroSchema.getField("timestamp_field").schema().getType(), + actualSchemaMessage); + assertEquals( + LogicalTypes.timestampMicros(), + avroSchema.getField("timestamp_field").schema().getLogicalType(), + actualSchemaMessage); + ZonedDateTime expected_timestamp = + ZonedDateTime.parse( + "2019-04-30T19:24:19Z", DateTimeFormatter.ISO_INSTANT.withZone(ZoneOffset.UTC)) + .withNano(123_456_000); + long actual_timestamp_micros = (long) record.get("timestamp_field"); + ZonedDateTime actual_timestamp = + ZonedDateTime.ofInstant( + Instant.ofEpochSecond( + /* epochSecond= */ actual_timestamp_micros / 1_000_000, + (actual_timestamp_micros % 1_000_000) * 1_000), + ZoneOffset.UTC); + assertEquals(expected_timestamp, actual_timestamp, rowAssertMessage); + } + + @Test + void testGeographySqlType() throws InterruptedException, IOException { + String tableName = "test_geography_sql_type" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s " + + " (geo_field GEOGRAPHY NOT NULL)" + + " OPTIONS( " + + " description=\"a table with a geography column type\" " + + " ) " + + "AS " + + " SELECT ST_GEOGPOINT(1.1, 2.2)", + DATASET, tableName); + + runQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ projectName, /* datasetId= */ DATASET, /* tableId= */ tableName); + + List rows = + Helper.readAllRows(readClient, parentProjectId, /* table= */ table, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); + + GenericData.Record record = rows.get(0); + Schema avroSchema = record.getSchema(); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); + + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(1, avroSchema.getFields().size(), actualSchemaMessage); + + assertEquals( + Schema.Type.STRING, + avroSchema.getField("geo_field").schema().getType(), + actualSchemaMessage); + assertEquals( + "GEOGRAPHY", + avroSchema.getField("geo_field").schema().getObjectProp("sqlType"), + actualSchemaMessage); + assertEquals(new Utf8("POINT(1.1 2.2)"), record.get("geo_field"), rowAssertMessage); + } + + @Test + void testStructAndArraySqlTypes() throws InterruptedException, IOException { + String tableName = + "test_struct_and_array_sql_types" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s (array_field ARRAY, struct_field STRUCT NOT NULL) OPTIONS( description=\"a" + + " table with array and time column types\" ) AS SELECT [1, 2, 3], " + + " (10, 'abc')", + DATASET, tableName); + + runQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ projectName, /* datasetId= */ DATASET, /* tableId= */ tableName); + + List rows = + Helper.readAllRows(readClient, parentProjectId, /* table= */ table, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); + + GenericData.Record record = rows.get(0); + Schema avroSchema = record.getSchema(); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); + + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage); + + assertEquals( + Schema.Type.ARRAY, + avroSchema.getField("array_field").schema().getType(), + actualSchemaMessage); + assertEquals( + Schema.Type.LONG, + avroSchema.getField("array_field").schema().getElementType().getType(), + actualSchemaMessage); + assertArrayEquals( + new Long[] {1L, 2L, 3L}, + ((GenericData.Array) record.get("array_field")).toArray(new Long[0]), + rowAssertMessage); + + // Validate the STRUCT field and its members. + Schema structSchema = avroSchema.getField("struct_field").schema(); + assertEquals(Schema.Type.RECORD, structSchema.getType(), actualSchemaMessage); + GenericData.Record structRecord = (GenericData.Record) record.get("struct_field"); + + assertEquals( + Schema.Type.LONG, + structSchema.getField("int_field").schema().getType(), + actualSchemaMessage); + assertEquals(10L, (long) structRecord.get("int_field"), rowAssertMessage); + + assertEquals( + Schema.Type.STRING, + structSchema.getField("str_field").schema().getType(), + actualSchemaMessage); + assertEquals(new Utf8("abc"), structRecord.get("str_field"), rowAssertMessage); + } + + @Test + void testSimpleReadWithBackgroundExecutorProvider() throws IOException { + BigQueryReadSettings bigQueryReadSettings = + BigQueryReadSettings.newBuilder() + .setBackgroundExecutorProvider( + InstantiatingExecutorProvider.newBuilder().setExecutorThreadCount(14).build()) + .build(); + // Overriding the default client + readClient = BigQueryReadClient.create(bigQueryReadSettings); + assertTrue( + readClient.getStub().getStubSettings().getBackgroundExecutorProvider() + instanceof InstantiatingExecutorProvider); + assertEquals( + 14, + ((InstantiatingExecutorProvider) + readClient.getStub().getStubSettings().getBackgroundExecutorProvider()) + .getExecutorThreadCount()); + String table = + BigQueryResource.formatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + ReadSession session = + readClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + long rowCount = 0; + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + assertEquals(SHAKESPEARE_SAMPLE_ROW_COUNT, rowCount); + } + + @Test + void testUniverseDomainWithInvalidUniverseDomain() throws IOException { + BigQueryReadSettings bigQueryReadSettings = + BigQueryReadSettings.newBuilder() + .setCredentialsProvider( + FixedCredentialsProvider.create( + Helper.loadCredentials(FAKE_JSON_CRED_WITH_GOOGLE_DOMAIN))) + .setUniverseDomain("invalid.domain") + .build(); + BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + UnauthenticatedException e = + assertThrows( + UnauthenticatedException.class, + () -> + localClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1), + "RPCs to invalid universe domain should fail"); + assertThat( + (e.getMessage() + .contains("does not match the universe domain found in the credentials"))) + .isTrue(); + localClient.close(); + } + + @Test + void testInvalidUniverseDomainWithMismatchCredentials() throws IOException { + BigQueryReadSettings bigQueryReadSettings = + BigQueryReadSettings.newBuilder() + .setCredentialsProvider( + FixedCredentialsProvider.create( + Helper.loadCredentials(FAKE_JSON_CRED_WITH_INVALID_DOMAIN))) + .setUniverseDomain("invalid.domain") + .build(); + BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + UnauthenticatedException e = + assertThrows( + UnauthenticatedException.class, + () -> + localClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1), + "RPCs to invalid universe domain should fail"); + assertTrue( + e.getMessage().contains("does not match the universe domain found in the credentials")); + localClient.close(); + } + + @Test + void testUniverseDomainWithMatchingDomain() throws IOException { + // Test a valid domain using the default credentials and Google default universe domain. + BigQueryReadSettings bigQueryReadSettings = + BigQueryReadSettings.newBuilder().setUniverseDomain("googleapis.com").build(); + BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + ReadSession session = + localClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + long rowCount = 0; + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + assertEquals(SHAKESPEARE_SAMPLE_ROW_COUNT, rowCount); + localClient.close(); + } + + @Test + void testSimpleReadWithOtelTracing() throws IOException { + SdkTracerProvider tracerProvider = + SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(new TestSpanExporter())) + .setSampler(Sampler.alwaysOn()) + .build(); + OpenTelemetry otel = OpenTelemetrySdk.builder().setTracerProvider(tracerProvider).build(); + + BigQueryReadSettings otelSettings = + BigQueryReadSettings.newBuilder() + .setEnableOpenTelemetryTracing(true) + .setOpenTelemetryTracerProvider(tracerProvider) + .build(); + BigQueryReadClient otelClient = BigQueryReadClient.create(otelSettings); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + ReadSession session = + otelClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + ServerStream stream = otelClient.readRowsCallable().call(readRowsRequest); + + assertNotNull( + OTEL_ATTRIBUTES.get("com.google.cloud.bigquery.storage.v1.read.createReadSession")); + assertNotNull( + OTEL_ATTRIBUTES.get("com.google.cloud.bigquery.storage.v1.read.createReadSessionCallable")); + assertNotNull( + OTEL_ATTRIBUTES.get( + "com.google.cloud.bigquery.storage.v1.read.stub.createReadSessionCallable")); + assertNotNull( + OTEL_ATTRIBUTES.get("com.google.cloud.bigquery.storage.v1.read.readRowsCallable")); + assertNotNull( + OTEL_ATTRIBUTES.get("com.google.cloud.bigquery.storage.v1.read.stub.readRowsCallable")); + + // createReadSession is the parent span of createReadSessionCallable + assertEquals( + "com.google.cloud.bigquery.storage.v1.read.createReadSession", + OTEL_SPAN_IDS_TO_NAMES.get( + OTEL_PARENT_SPAN_IDS.get( + "com.google.cloud.bigquery.storage.v1.read.createReadSessionCallable"))); + + Map, Object> createReadSessionMap = + OTEL_ATTRIBUTES.get("com.google.cloud.bigquery.storage.v1.read.createReadSession"); + assertNotNull(createReadSessionMap); + assertNotNull( + createReadSessionMap.get( + AttributeKey.longKey("bq.storage.read_session.request.max_stream_count"))); + assertEquals( + 1L, + createReadSessionMap.get( + AttributeKey.longKey("bq.storage.read_session.request.max_stream_count"))); + } + + void testUniverseDomain() throws IOException { + // This test is not yet part presubmit integration test as it requires the apis-tpclp.goog + // universe domain credentials. + // Test a valid domain using the default credentials and Google default universe domain. + BigQueryReadSettings bigQueryReadSettings = + BigQueryReadSettings.newBuilder().setUniverseDomain("apis-tpclp.goog").build(); + BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); + + String table = + BigQueryResource.formatTableResource( + /* projectId= */ "google-tpc-testing-environment:cloudsdk-test-project", + /* datasetId= */ "tpc_demo_dataset", + /* tableId= */ "new_table"); + + ReadSession session = + localClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + long rowCount = 0; + ServerStream stream = localClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + assertEquals(1, rowCount); + localClient.close(); + } + + /** + * Reads to the specified row offset within the stream. If the stream does not have the desired + * rows to read, it will read all of them. + * + * @param readStream + * @param rowOffset + * @return the number of requested rows to skip or the total rows read if stream had less rows. + */ + private long readStreamToOffset(ReadStream readStream, long rowOffset) { + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(readStream.getName()).build(); + + long rowCount = 0; + ServerStream serverStream = + readClient.readRowsCallable().call(readRowsRequest); + + for (ReadRowsResponse response : serverStream) { + rowCount += response.getRowCount(); + if (rowCount >= rowOffset) { + return rowOffset; + } + } + + return rowCount; + } + + /** + * Runs a query job with WRITE_APPEND disposition to the destination table and returns the + * successfully completed job. + * + * @param destinationTableId + * @param query + * @return + * @throws InterruptedException + */ + private Job runQueryAppendJobAndExpectSuccess(TableId destinationTableId, String query) + throws InterruptedException { + return runQueryJobAndExpectSuccess( + QueryJobConfiguration.newBuilder(query) + .setDestinationTable(destinationTableId) + .setUseQueryCache(false) + .setUseLegacySql(false) + .setWriteDisposition(WriteDisposition.WRITE_APPEND) + .build()); + } + + /** + * Runs a query job with provided configuration and returns the successfully completed job. + * + * @param configuration + * @return + * @throws InterruptedException + */ + private Job runQueryJobAndExpectSuccess(QueryJobConfiguration configuration) + throws InterruptedException { + Job job = bigquery.create(JobInfo.of(configuration)); + Job completedJob = + job.waitFor( + RetryOption.initialRetryDelayDuration(Duration.ofSeconds(1)), + RetryOption.totalTimeoutDuration(Duration.ofMinutes(1))); + + assertNotNull(completedJob); + assertNull( + /* object= */ completedJob.getStatus().getError(), + /* message= */ "Received a job status that is not a success: " + + completedJob.getStatus().toString()); + + return completedJob; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageWriteClientTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageWriteClientTest.java new file mode 100644 index 000000000000..7f35901d74e9 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageWriteClientTest.java @@ -0,0 +1,2587 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it; + +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT; +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_COLUMN_NAME; +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import com.google.api.client.util.Sleeper; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.api.gax.rpc.HeaderProvider; +import com.google.cloud.ServiceOptions; +import com.google.cloud.bigquery.*; +import com.google.cloud.bigquery.Field.Mode; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.storage.test.Test.*; +import com.google.cloud.bigquery.storage.test.TestOptional.*; +import com.google.cloud.bigquery.storage.v1.*; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation; +import com.google.cloud.bigquery.storage.v1.Exceptions.AppendSerializationError; +import com.google.cloud.bigquery.storage.v1.Exceptions.OffsetAlreadyExists; +import com.google.cloud.bigquery.storage.v1.Exceptions.OffsetOutOfRange; +import com.google.cloud.bigquery.storage.v1.Exceptions.SchemaMismatchedException; +import com.google.cloud.bigquery.storage.v1.Exceptions.StreamFinalizedException; +import com.google.cloud.bigquery.storage.v1.it.util.BigQueryResource; +import com.google.cloud.bigquery.storage.v1.it.util.Helper; +import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.ByteString; +import com.google.protobuf.DescriptorProtos; +import com.google.protobuf.DescriptorProtos.DescriptorProto; +import com.google.protobuf.DescriptorProtos.FieldDescriptorProto; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import com.google.protobuf.DynamicMessage; +import com.google.protobuf.Int64Value; +import com.google.protobuf.Message; +import io.grpc.Status; +import io.grpc.Status.Code; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.math.BigDecimal; +import java.nio.channels.Channels; +import java.sql.Timestamp; +import java.text.ParseException; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.temporal.ChronoUnit; +import java.util.*; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.*; +import org.apache.arrow.vector.compression.CompressionCodec; +import org.apache.arrow.vector.compression.CompressionUtil; +import org.apache.arrow.vector.compression.NoCompressionCodec; +import org.apache.arrow.vector.ipc.WriteChannel; +import org.apache.arrow.vector.ipc.message.MessageSerializer; +import org.apache.arrow.vector.types.pojo.ArrowType; +import org.apache.arrow.vector.types.pojo.FieldType; +import org.apache.avro.generic.GenericData; +import org.json.JSONArray; +import org.json.JSONObject; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +/** Integration tests for BigQuery Write API. */ +@Execution(ExecutionMode.SAME_THREAD) +class ITBigQueryStorageWriteClientTest { + private static final Logger LOG = + Logger.getLogger(ITBigQueryStorageWriteClientTest.class.getName()); + private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); + private static final String DATASET_EU = RemoteBigQueryHelper.generateDatasetName(); + private static final String TABLE = "testtable"; + private static final String TABLE2 = "complicatedtable"; + + private static final String TEST_TRACE_ID = "DATAFLOW:job_id"; + + private static final String DESCRIPTION = "BigQuery Write Java manual client test dataset"; + + private static BigQueryReadClient readClient; + private static BigQueryWriteClient writeClient; + private static String parentProjectId; + private static TableInfo tableInfo; + private static TableInfo tableInfo2; + + private static TableInfo tableInfoEU; + + private static TableDefinition defaultValueTableDefinition; + private static String tableId; + private static String tableId2; + + private static String tableIdEU; + private static BigQuery bigquery; + + private static final BufferAllocator allocator = new RootAllocator(); + + // Arrow is a bit special in that timestamps are limited to nanoseconds precision. + // The data will be padded to fit into the higher precision columns. + private static final Object[][] INPUT_ARROW_WRITE_TIMESTAMPS = + new Object[][] { + {1735734896123456L /* 2025-01-01T12:34:56.123456Z */, 1735734896123456789L}, + {1580646896123456L /* 2020-02-02T12:34:56.123456Z */, 1580646896123456789L}, + {636467696123456L /* 1990-03-03T12:34:56.123456Z */, 636467696123456789L}, + {165846896123456L /* 1975-04-04T12:34:56.123456Z */, 165846896123456789L} + }; + + // Arrow's higher precision column is padded with extra 0's if configured to return + // ISO as output for any picosecond enabled column. + private static final Object[][] EXPECTED_ARROW_WRITE_TIMESTAMPS_ISO_OUTPUT = + new Object[][] { + {1735734896123456L /* 2025-01-01T12:34:56.123456Z */, "2025-01-01T12:34:56.123456789000Z"}, + {1580646896123456L /* 2020-02-02T12:34:56.123456Z */, "2020-02-02T12:34:56.123456789000Z"}, + {636467696123456L /* 1990-03-03T12:34:56.123456Z */, "1990-03-03T12:34:56.123456789000Z"}, + {165846896123456L /* 1975-04-04T12:34:56.123456Z */, "1975-04-04T12:34:56.123456789000Z"} + }; + + // Special case where users can use the Write API with Protobuf messages + // The format is two fields: 1. Seconds from epoch and 2. Subsecond fractional (millis, micros, + // nano, or pico). This test case is using picos sub-second fractional + private static final Long[][] INPUT_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS = + new Long[][] { + {1735734896L, 123456789123L}, /* 2025-01-01T12:34:56.123456789123Z */ + {1580646896L, 123456789123L}, /* 2020-02-02T12:34:56.123456789123Z */ + {636467696L, 123456789123L}, /* 1990-03-03T12:34:56.123456789123Z */ + {165846896L, 123456789123L} /* 1975-04-04T12:34:56.123456789123Z */ + }; + + // Expected ISO8601 output when using proto descriptors to write to BQ with pico precision + private static final String[] + EXPECTED_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS_HIGH_PRECISION_ISO_OUTPUT = + new String[] { + "2025-01-01T12:34:56.123456789123Z", + "2020-02-02T12:34:56.123456789123Z", + "1990-03-03T12:34:56.123456789123Z", + "1975-04-04T12:34:56.123456789123Z" + }; + + static class StringWithSecondsNanos { + public String foo; + public long seconds; + public int nanos; + + public StringWithSecondsNanos(String fooParam, long secondsParam, int nanosParam) { + foo = fooParam; + seconds = secondsParam; + nanos = nanosParam; + } + } + + private static final HeaderProvider USER_AGENT_HEADER_PROVIDER = + FixedHeaderProvider.create("User-Agent", "my_product_name/1.0 (GPN:Samples;test)"); + + @BeforeAll + static void beforeAll() throws IOException { + readClient = BigQueryReadClient.create(); + + BigQueryWriteSettings settings = + BigQueryWriteSettings.newBuilder().setHeaderProvider(USER_AGENT_HEADER_PROVIDER).build(); + writeClient = BigQueryWriteClient.create(settings); + parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId()); + + RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); + bigquery = bigqueryHelper.getOptions().getService(); + DatasetInfo datasetInfo = + DatasetInfo.newBuilder(/* datasetId= */ DATASET).setDescription(DESCRIPTION).build(); + bigquery.create(datasetInfo); + LOG.info("Created test dataset: " + DATASET); + tableInfo = + TableInfo.newBuilder( + TableId.of(DATASET, TABLE), + StandardTableDefinition.of( + Schema.of( + com.google.cloud.bigquery.Field.newBuilder("foo", LegacySQLTypeName.STRING) + .setMode(Field.Mode.NULLABLE) + .build()))) + .build(); + com.google.cloud.bigquery.Field.Builder innerTypeFieldBuilder = + com.google.cloud.bigquery.Field.newBuilder( + "inner_type", + LegacySQLTypeName.RECORD, + com.google.cloud.bigquery.Field.newBuilder("value", LegacySQLTypeName.STRING) + .setMode(Field.Mode.REPEATED) + .build()); + + tableInfo2 = + TableInfo.newBuilder( + TableId.of(DATASET, TABLE2), + StandardTableDefinition.of( + Schema.of( + Field.newBuilder( + "nested_repeated_type", + LegacySQLTypeName.RECORD, + innerTypeFieldBuilder.setMode(Field.Mode.REPEATED).build()) + .setMode(Field.Mode.REPEATED) + .build(), + innerTypeFieldBuilder.setMode(Field.Mode.NULLABLE).build()))) + .build(); + + defaultValueTableDefinition = + StandardTableDefinition.of( + Schema.of( + com.google.cloud.bigquery.Field.newBuilder( + "foo_with_default", LegacySQLTypeName.STRING) + .setDefaultValueExpression("'default_value_for_test'") + .setMode(Field.Mode.NULLABLE) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "bar_without_default", LegacySQLTypeName.STRING) + .setMode(Mode.NULLABLE) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "date_with_default_to_current", LegacySQLTypeName.TIMESTAMP) + .setDefaultValueExpression("CURRENT_TIMESTAMP()") + .setMode(Mode.NULLABLE) + .build())); + bigquery.create(tableInfo); + bigquery.create(tableInfo2); + tableId = + String.format( + "projects/%s/datasets/%s/tables/%s", + ServiceOptions.getDefaultProjectId(), DATASET, TABLE); + tableId2 = + String.format( + "projects/%s/datasets/%s/tables/%s", + ServiceOptions.getDefaultProjectId(), DATASET, TABLE2); + DatasetInfo datasetInfoEU = + DatasetInfo.newBuilder(/* datasetId= */ DATASET_EU) + .setLocation("EU") + .setDescription(DESCRIPTION) + .build(); + bigquery.create(datasetInfoEU); + tableInfoEU = + TableInfo.newBuilder( + TableId.of(DATASET_EU, TABLE), + StandardTableDefinition.of( + Schema.of( + com.google.cloud.bigquery.Field.newBuilder("foo", LegacySQLTypeName.STRING) + .build()))) + .build(); + tableIdEU = + String.format( + "projects/%s/datasets/%s/tables/%s", + ServiceOptions.getDefaultProjectId(), DATASET_EU, TABLE); + bigquery.create(tableInfoEU); + } + + @AfterAll + static void afterAll() throws InterruptedException { + if (writeClient != null) { + writeClient.close(); + writeClient.awaitTermination(10, TimeUnit.SECONDS); + } + + if (readClient != null) { + readClient.close(); + readClient.awaitTermination(10, TimeUnit.SECONDS); + } + + if (bigquery != null) { + RemoteBigQueryHelper.forceDelete(bigquery, DATASET); + RemoteBigQueryHelper.forceDelete(bigquery, DATASET_EU); + } + } + + ProtoRows createProtoRows(String[] messages) { + ProtoRows.Builder rows = ProtoRows.newBuilder(); + for (String message : messages) { + FooType foo = FooType.newBuilder().setFoo(message).build(); + rows.addSerializedRows(foo.toByteString()); + } + return rows.build(); + } + + ProtoSchema createProtoSchemaWithColField() { + return ProtoSchema.newBuilder() + .setProtoDescriptor( + DescriptorProto.newBuilder() + .setName("testProto") + .addField( + FieldDescriptorProto.newBuilder() + .setName("col1") + .setNumber(1) + .setType(FieldDescriptorProto.Type.TYPE_STRING) + .build()) + .build()) + .build(); + } + + ProtoRows createProtoOptionalRows(String[] messages) { + ProtoRows.Builder rows = ProtoRows.newBuilder(); + for (String message : messages) { + FooOptionalType foo = FooOptionalType.newBuilder().setFoo(message).build(); + rows.addSerializedRows(foo.toByteString()); + } + return rows.build(); + } + + ProtoRows createProtoRowsMultipleColumns(String[] messages) { + ProtoRows.Builder rows = ProtoRows.newBuilder(); + for (String message : messages) { + UpdatedFooType foo = UpdatedFooType.newBuilder().setFoo(message).setBar(message).build(); + rows.addSerializedRows(foo.toByteString()); + } + return rows.build(); + } + + ProtoRows createProtoRowsComplex(String[] messages) { + ProtoRows.Builder rows = ProtoRows.newBuilder(); + for (String message : messages) { + ComplicateType foo = + ComplicateType.newBuilder() + .setInnerType(InnerType.newBuilder().addValue(message).addValue(message).build()) + .build(); + rows.addSerializedRows(foo.toByteString()); + } + return rows.build(); + } + + ProtoRows createProtoRowsMixed(StringWithSecondsNanos[] messages) { + ProtoRows.Builder rows = ProtoRows.newBuilder(); + for (StringWithSecondsNanos message : messages) { + FooTimestampType datum = + FooTimestampType.newBuilder() + .setFoo(message.foo) + .setBar( + com.google.protobuf.Timestamp.newBuilder() + .setSeconds(message.seconds) + .setNanos(message.nanos) + .build()) + .build(); + rows.addSerializedRows(datum.toByteString()); + } + return rows.build(); + } + + @Test + void testBatchWriteWithCommittedStreamEU() + throws IOException, InterruptedException, ExecutionException { + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(tableIdEU) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + ApiFuture response1; + ApiFuture response2; + try (StreamWriter streamWriter = + StreamWriter.newBuilder(writeStream.getName()) + .setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())) + .build()) { + LOG.info("Sending one message"); + + ApiFuture response = + streamWriter.append(createProtoRows(new String[] {"aaa"}), 0); + assertEquals(0, response.get().getAppendResult().getOffset().getValue()); + + LOG.info("Sending two more messages"); + response1 = streamWriter.append(createProtoRows(new String[] {"bbb", "ccc"}), 1); + response2 = streamWriter.append(createProtoRows(new String[] {"ddd"}), 3); + } + assertEquals(1, response1.get().getAppendResult().getOffset().getValue()); + assertEquals(3, response2.get().getAppendResult().getOffset().getValue()); + } + + @Test + void testProto3OptionalBatchWriteWithCommittedStream() + throws IOException, InterruptedException, ExecutionException { + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(tableId) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + ApiFuture response1; + ApiFuture response2; + try (StreamWriter streamWriter = + StreamWriter.newBuilder(writeStream.getName()) + .setWriterSchema(ProtoSchemaConverter.convert(FooOptionalType.getDescriptor())) + .build()) { + LOG.info("Sending one message"); + + ApiFuture response = + streamWriter.append(createProtoOptionalRows(new String[] {"aaa"}), 0); + assertEquals(0, response.get().getAppendResult().getOffset().getValue()); + + LOG.info("Sending two more messages"); + response1 = streamWriter.append(createProtoOptionalRows(new String[] {"bbb", "ccc"}), 1); + response2 = streamWriter.append(createProtoOptionalRows(new String[] {""}), 3); + } + assertEquals(1, response1.get().getAppendResult().getOffset().getValue()); + assertEquals(3, response2.get().getAppendResult().getOffset().getValue()); + } + + @Test + void testJsonStreamWriterCommittedStream() + throws IOException, + InterruptedException, + ExecutionException, + Descriptors.DescriptorValidationException { + String tableName = "JsonTable"; + TableInfo tableInfo = + TableInfo.newBuilder( + TableId.of(DATASET, tableName), + StandardTableDefinition.of( + Schema.of( + com.google.cloud.bigquery.Field.newBuilder( + "test_str", StandardSQLTypeName.STRING) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_numerics", StandardSQLTypeName.NUMERIC) + .setMode(Field.Mode.REPEATED) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_datetime", StandardSQLTypeName.DATETIME) + .build()))) + .build(); + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(parent.toString()) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(writeStream.getName(), writeStream.getTableSchema()).build()) { + LOG.info("Sending one message"); + JSONObject row1 = new JSONObject(); + row1.put("test_str", "aaa"); + row1.put( + "test_numerics", + new JSONArray( + new byte[][] { + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("123.4")) + .toByteArray(), + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("-9000000")) + .toByteArray() + })); + row1.put( + "test_datetime", + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.of(2020, 10, 1, 12, 0))); + JSONArray jsonArr1 = new JSONArray(new JSONObject[] {row1}); + + ApiFuture response1 = jsonStreamWriter.append(jsonArr1, -1); + + assertEquals(0, response1.get().getAppendResult().getOffset().getValue()); + + JSONObject row2 = new JSONObject(); + row1.put("test_str", "bbb"); + JSONObject row3 = new JSONObject(); + row2.put("test_str", "ccc"); + JSONArray jsonArr2 = new JSONArray(); + jsonArr2.put(row1); + jsonArr2.put(row2); + + JSONObject row4 = new JSONObject(); + row4.put("test_str", "ddd"); + JSONArray jsonArr3 = new JSONArray(); + jsonArr3.put(row4); + + LOG.info("Sending two more messages"); + ApiFuture response2 = jsonStreamWriter.append(jsonArr2, -1); + LOG.info("Sending one more message"); + ApiFuture response3 = jsonStreamWriter.append(jsonArr3, -1); + assertEquals(1, response2.get().getAppendResult().getOffset().getValue()); + assertEquals(3, response3.get().getAppendResult().getOffset().getValue()); + + TableResult result = + bigquery.listTableData( + tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); + Iterator iter = result.getValues().iterator(); + FieldValueList currentRow = iter.next(); + assertEquals("aaa", currentRow.get(0).getStringValue()); + assertEquals("-9000000", currentRow.get(1).getRepeatedValue().get(1).getStringValue()); + assertEquals("2020-10-01T12:00:00", currentRow.get(2).getStringValue()); + assertEquals("bbb", iter.next().get(0).getStringValue()); + assertEquals("ccc", iter.next().get(0).getStringValue()); + assertEquals("ddd", iter.next().get(0).getStringValue()); + assertFalse(iter.hasNext()); + } + } + + @Test + void testRowErrors() + throws IOException, + InterruptedException, + ExecutionException, + Descriptors.DescriptorValidationException { + String tableName = "TestBadRowsTable"; + TableInfo tableInfo = + TableInfo.newBuilder( + TableId.of(DATASET, tableName), + StandardTableDefinition.of( + Schema.of( + com.google.cloud.bigquery.Field.newBuilder( + "foo", StandardSQLTypeName.STRING) + .setMaxLength(10L) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "bar", StandardSQLTypeName.TIMESTAMP) + .build()))) + .build(); + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + ApiFuture futureResponse1; + try (StreamWriter streamWriter = + StreamWriter.newBuilder(parent.toString() + "/_default") + .setWriterSchema(ProtoSchemaConverter.convert(FooTimestampType.getDescriptor())) + .build()) { + + LOG.info("Sending three messages"); + StringWithSecondsNanos[] myBadList = { + new StringWithSecondsNanos("aaabbbcccddd", 1663821424, 0), + new StringWithSecondsNanos("bbb", Long.MIN_VALUE, 0), + new StringWithSecondsNanos("cccdddeeefffggg", 1663621424, 0) + }; + ApiFuture futureResponse = + streamWriter.append(createProtoRowsMixed(myBadList), -1); + AppendRowsResponse actualResponse = null; + try { + actualResponse = futureResponse.get(); + } catch (Throwable t) { + assertTrue(t instanceof ExecutionException); + t = t.getCause(); + assertTrue(t instanceof AppendSerializationError); + AppendSerializationError e = (AppendSerializationError) t; + LOG.info("Found row errors on stream: " + e.getStreamName()); + assertEquals( + "Field foo: STRING(10) has maximum length 10 but got a value with length 12 on field" + + " foo.", + e.getRowIndexToErrorMessage().get(0)); + assertEquals( + "Timestamp field value is out of range: -9223372036854775808 on field bar.", + e.getRowIndexToErrorMessage().get(1)); + assertEquals( + "Field foo: STRING(10) has maximum length 10 but got a value with length 15 on field" + + " foo.", + e.getRowIndexToErrorMessage().get(2)); + for (Map.Entry entry : e.getRowIndexToErrorMessage().entrySet()) { + LOG.info("Bad row index: " + entry.getKey() + ", has problem: " + entry.getValue()); + } + } + assertNull(actualResponse); + + LOG.info("Resending with three good messages"); + StringWithSecondsNanos[] myGoodList = { + new StringWithSecondsNanos("aaa", 1664821424, 0), + new StringWithSecondsNanos("bbb", 1663821424, 0), + new StringWithSecondsNanos("ccc", 1664801424, 0) + }; + futureResponse1 = streamWriter.append(createProtoRowsMixed(myGoodList), -1); + } + assertEquals(0, futureResponse1.get().getAppendResult().getOffset().getValue()); + + TableResult result = + bigquery.listTableData(tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); + for (FieldValueList currentRow : result.getValues()) { + LOG.info("Table row contains " + currentRow.size() + " field values."); + LOG.info("Table column has foo: " + currentRow.get(0).getStringValue()); + LOG.info("Table column has bar: " + currentRow.get(1).getTimestampValue()); + } + + Iterator iter = result.getValues().iterator(); + FieldValueList currentRow = iter.next(); + assertEquals("aaa", currentRow.get(0).getStringValue()); + assertEquals(1664821424000000L, currentRow.get(1).getTimestampValue()); + currentRow = iter.next(); + assertEquals("bbb", currentRow.get(0).getStringValue()); + assertEquals(1663821424000000L, currentRow.get(1).getTimestampValue()); + currentRow = iter.next(); + assertEquals("ccc", currentRow.get(0).getStringValue()); + assertEquals(1664801424000000L, currentRow.get(1).getTimestampValue()); + assertFalse(iter.hasNext()); + } + + @Test + void testRequestProfilerWithCommittedStream() + throws DescriptorValidationException, IOException, InterruptedException { + String tableName = "TestProfiler"; + TableId tableId = TableId.of(DATASET, tableName); + Field col1 = Field.newBuilder("col1", StandardSQLTypeName.STRING).build(); + Schema schema = Schema.of(col1); + TableInfo tableInfo = TableInfo.newBuilder(tableId, StandardTableDefinition.of(schema)).build(); + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(parent.toString()) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + int totalRequest = 50; + int rowBatch = 1200; + ArrayList> allResponses = + new ArrayList>(totalRequest); + RequestProfiler.setReportPeriod(Duration.ofMillis(300)); + // Sends a total of 30MB over the wire. + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(writeStream.getName(), writeStream.getTableSchema()) + .setEnableLatencyProfiler(true) + .build()) { + for (int k = 0; k < totalRequest; k++) { + JSONObject row = new JSONObject(); + row.put("col1", "aaaaa"); + JSONArray jsonArr = new JSONArray(); + // 3MB batch. + for (int j = 0; j < rowBatch; j++) { + jsonArr.put(row); + } + LOG.info("Appending: " + k + "/" + totalRequest); + Sleeper.DEFAULT.sleep(50); + allResponses.add(jsonStreamWriter.append(jsonArr, k * rowBatch)); + } + } + LOG.info("Waiting for all responses to come back"); + for (int i = 0; i < totalRequest; i++) { + try { + assertEquals( + allResponses.get(i).get().getAppendResult().getOffset().getValue(), i * rowBatch); + } catch (ExecutionException ex) { + fail("Unexpected error " + ex); + } + } + RequestProfiler.disableAndResetProfiler(); + } + + @Test + void testJsonStreamWriterWithDefaultSchema() + throws IOException, + InterruptedException, + ExecutionException, + Descriptors.DescriptorValidationException { + String tableName = "JsonTableDefaultSchema"; + TableInfo tableInfo = + TableInfo.newBuilder( + TableId.of(DATASET, tableName), + StandardTableDefinition.of( + Schema.of( + com.google.cloud.bigquery.Field.newBuilder( + "test_str", StandardSQLTypeName.STRING) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_numerics", StandardSQLTypeName.NUMERIC) + .setMode(Field.Mode.REPEATED) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_datetime", StandardSQLTypeName.DATETIME) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_bytestring_repeated", StandardSQLTypeName.BYTES) + .setMode(Field.Mode.REPEATED) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_timestamp", StandardSQLTypeName.TIMESTAMP) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_json", StandardSQLTypeName.JSON) + .build()))) + .build(); + + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + + // Create JsonStreamWriter with newBuilder(streamOrTable, client) + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(parent.toString(), writeClient) + .setIgnoreUnknownFields(true) + .build()) { + LOG.info("Sending one message"); + JSONObject row1 = new JSONObject(); + row1.put("test_str", "aaa"); + row1.put( + "test_numerics", + new JSONArray( + new byte[][] { + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("123.4")) + .toByteArray(), + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("-9000000")) + .toByteArray() + })); + row1.put("unknown_field", "a"); + row1.put( + "test_datetime", + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.of(2020, 10, 1, 12, 0))); + row1.put( + "test_bytestring_repeated", + new JSONArray( + new byte[][] { + ByteString.copyFromUtf8("a").toByteArray(), + ByteString.copyFromUtf8("b").toByteArray() + })); + row1.put("test_timestamp", "2022-02-06 07:24:47.84"); + row1.put("test_json", "{}"); + JSONArray jsonArr1 = new JSONArray(new JSONObject[] {row1}); + + ApiFuture response1 = jsonStreamWriter.append(jsonArr1, -1); + + assertEquals(0, response1.get().getAppendResult().getOffset().getValue()); + + JSONObject row2 = new JSONObject(); + row1.put("test_str", "bbb"); + JSONObject row3 = new JSONObject(); + row2.put("test_str", "ccc"); + JSONArray jsonArr2 = new JSONArray(); + jsonArr2.put(row1); + jsonArr2.put(row2); + + JSONObject row4 = new JSONObject(); + row4.put("test_str", "ddd"); + JSONArray jsonArr3 = new JSONArray(); + jsonArr3.put(row4); + + JSONObject row5 = new JSONObject(); + // Add another ARRAY using a more idiomatic way + JSONArray testArr = new JSONArray(); // create empty JSONArray + testArr.put(0, ByteString.copyFromUtf8("a").toByteArray()); // insert 1st bytes array + testArr.put(1, ByteString.copyFromUtf8("b").toByteArray()); // insert 2nd bytes array + row5.put("test_bytestring_repeated", testArr); + JSONArray jsonArr4 = new JSONArray(); + jsonArr4.put(row5); + + LOG.info("Sending three more messages"); + ApiFuture response2 = jsonStreamWriter.append(jsonArr2, -1); + LOG.info("Sending two more messages"); + ApiFuture response3 = jsonStreamWriter.append(jsonArr3, -1); + LOG.info("Sending one more message"); + ApiFuture response4 = jsonStreamWriter.append(jsonArr4, -1); + assertFalse(response2.get().getAppendResult().hasOffset()); + assertFalse(response3.get().getAppendResult().hasOffset()); + assertFalse(response4.get().getAppendResult().hasOffset()); + + TableResult result = + bigquery.listTableData( + tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); + Iterator iter = result.getValues().iterator(); + FieldValueList currentRow = iter.next(); + assertEquals("aaa", currentRow.get(0).getStringValue()); + assertEquals("-9000000", currentRow.get(1).getRepeatedValue().get(1).getStringValue()); + assertEquals("2020-10-01T12:00:00", currentRow.get(2).getStringValue()); + assertEquals(2, currentRow.get(3).getRepeatedValue().size()); + assertEquals("Yg==", currentRow.get(3).getRepeatedValue().get(1).getStringValue()); + assertEquals("bbb", iter.next().get(0).getStringValue()); + assertEquals("ccc", iter.next().get(0).getStringValue()); + assertEquals("ddd", iter.next().get(0).getStringValue()); + FieldValueList currentRow2 = iter.next(); + assertEquals("YQ==", currentRow2.get(3).getRepeatedValue().get(0).getStringValue()); + assertEquals("Yg==", currentRow2.get(3).getRepeatedValue().get(1).getStringValue()); + assertFalse(iter.hasNext()); + } + } + + @Test + void testJsonStreamWriterWithDefaultSchemaNoTable() { + String tableName = "JsonStreamWriterWithDefaultSchemaNoTable"; + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + + // Create JsonStreamWriter with newBuilder(streamOrTable, client) + try (JsonStreamWriter ignore = + JsonStreamWriter.newBuilder(parent.toString(), writeClient) + .setIgnoreUnknownFields(true) + .build()) { + // Do nothing + } catch (Exception exception) { + assertTrue(exception.getMessage().contains("it may not exist")); + } + } + + @Test + void testJsonStreamWriterWithDefaultStream() + throws IOException, + InterruptedException, + ExecutionException, + Descriptors.DescriptorValidationException { + String tableName = "JsonTableDefaultStream"; + TableFieldSchema TEST_STRING = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_str") + .build(); + TableFieldSchema TEST_NUMERIC = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.NUMERIC) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_numerics") + .build(); + TableFieldSchema TEST_DATE = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DATETIME) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_datetime") + .build(); + TableFieldSchema TEST_REPEATED_BYTESTRING = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.BYTES) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_bytestring_repeated") + .build(); + TableFieldSchema TEST_TIMESTAMP = + TableFieldSchema.newBuilder() + .setName("test_timeStamp") + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields(0, TEST_STRING) + .addFields(1, TEST_DATE) + .addFields(2, TEST_NUMERIC) + .addFields(3, TEST_REPEATED_BYTESTRING) + .addFields(4, TEST_TIMESTAMP) + .build(); + TableInfo tableInfo = + TableInfo.newBuilder( + TableId.of(DATASET, tableName), + StandardTableDefinition.of( + Schema.of( + com.google.cloud.bigquery.Field.newBuilder( + "test_str", StandardSQLTypeName.STRING) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_numerics", StandardSQLTypeName.NUMERIC) + .setMode(Field.Mode.REPEATED) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_datetime", StandardSQLTypeName.DATETIME) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_bytestring_repeated", StandardSQLTypeName.BYTES) + .setMode(Field.Mode.REPEATED) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_timestamp", StandardSQLTypeName.TIMESTAMP) + .build()))) + .build(); + + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(parent.toString(), tableSchema) + .setIgnoreUnknownFields(true) + .build()) { + LOG.info("Sending one message"); + JSONObject row1 = new JSONObject(); + row1.put("test_str", "aaa"); + row1.put( + "test_numerics", + new JSONArray( + new byte[][] { + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("123.4")) + .toByteArray(), + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("-9000000")) + .toByteArray() + })); + row1.put("unknown_field", "a"); + row1.put( + "test_datetime", + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.of(2020, 10, 1, 12, 0))); + row1.put( + "test_bytestring_repeated", + new JSONArray( + new byte[][] { + ByteString.copyFromUtf8("a").toByteArray(), + ByteString.copyFromUtf8("b").toByteArray() + })); + row1.put("test_timestamp", "2022-02-06 07:24:47.84"); + JSONArray jsonArr1 = new JSONArray(new JSONObject[] {row1}); + + ApiFuture response1 = jsonStreamWriter.append(jsonArr1, -1); + + assertEquals(0, response1.get().getAppendResult().getOffset().getValue()); + + JSONObject row2 = new JSONObject(); + row1.put("test_str", "bbb"); + JSONObject row3 = new JSONObject(); + row2.put("test_str", "ccc"); + JSONArray jsonArr2 = new JSONArray(); + jsonArr2.put(row1); + jsonArr2.put(row2); + + JSONObject row4 = new JSONObject(); + row4.put("test_str", "ddd"); + JSONArray jsonArr3 = new JSONArray(); + jsonArr3.put(row4); + + JSONObject row5 = new JSONObject(); + // Add another ARRAY using a more idiomatic way + JSONArray testArr = new JSONArray(); // create empty JSONArray + testArr.put(0, ByteString.copyFromUtf8("a").toByteArray()); // insert 1st bytes array + testArr.put(1, ByteString.copyFromUtf8("b").toByteArray()); // insert 2nd bytes array + row5.put("test_bytestring_repeated", testArr); + JSONArray jsonArr4 = new JSONArray(); + jsonArr4.put(row5); + + LOG.info("Sending three more messages"); + ApiFuture response2 = jsonStreamWriter.append(jsonArr2, -1); + LOG.info("Sending two more messages"); + ApiFuture response3 = jsonStreamWriter.append(jsonArr3, -1); + LOG.info("Sending one more message"); + ApiFuture response4 = jsonStreamWriter.append(jsonArr4, -1); + assertFalse(response2.get().getAppendResult().hasOffset()); + assertFalse(response3.get().getAppendResult().hasOffset()); + assertFalse(response4.get().getAppendResult().hasOffset()); + + TableResult result = + bigquery.listTableData( + tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); + Iterator iter = result.getValues().iterator(); + FieldValueList currentRow = iter.next(); + assertEquals("aaa", currentRow.get(0).getStringValue()); + assertEquals("-9000000", currentRow.get(1).getRepeatedValue().get(1).getStringValue()); + assertEquals("2020-10-01T12:00:00", currentRow.get(2).getStringValue()); + assertEquals(2, currentRow.get(3).getRepeatedValue().size()); + assertEquals("Yg==", currentRow.get(3).getRepeatedValue().get(1).getStringValue()); + assertEquals( + Timestamp.valueOf("2022-02-06 07:24:47.84") + .toLocalDateTime() + .atZone(ZoneId.of("UTC")) + .toInstant() + .toEpochMilli() + * 1000, + currentRow.get(4).getTimestampValue()); // timestamp long of "2022-02-06 07:24:47.84" + assertEquals("bbb", iter.next().get(0).getStringValue()); + assertEquals("ccc", iter.next().get(0).getStringValue()); + assertEquals("ddd", iter.next().get(0).getStringValue()); + FieldValueList currentRow2 = iter.next(); + assertEquals("YQ==", currentRow2.get(3).getRepeatedValue().get(0).getStringValue()); + assertEquals("Yg==", currentRow2.get(3).getRepeatedValue().get(1).getStringValue()); + assertFalse(iter.hasNext()); + } + } + + @Test + void testJsonDefaultStreamOnTableWithDefaultValue_SchemaNotGiven() + throws IOException, + InterruptedException, + ExecutionException, + Descriptors.DescriptorValidationException, + ParseException { + String tableName = "defaultStreamDefaultValue"; + String defaultTableId = + String.format( + "projects/%s/datasets/%s/tables/%s", + ServiceOptions.getDefaultProjectId(), DATASET, tableName); + tableInfo = + TableInfo.newBuilder(TableId.of(DATASET, tableName), defaultValueTableDefinition).build(); + bigquery.create(tableInfo); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(defaultTableId, writeClient) + .setDefaultMissingValueInterpretation(MissingValueInterpretation.DEFAULT_VALUE) + .build()) { + testJsonStreamWriterForDefaultValue(jsonStreamWriter); + } + } + + @Test + void testJsonExclusiveStreamOnTableWithDefaultValue_GiveTableSchema() + throws IOException, + InterruptedException, + ExecutionException, + Descriptors.DescriptorValidationException, + ParseException { + String tableName = "exclusiveStreamDefaultValue"; + String exclusiveTableId = + String.format( + "projects/%s/datasets/%s/tables/%s", + ServiceOptions.getDefaultProjectId(), DATASET, tableName); + tableInfo = + TableInfo.newBuilder(TableId.of(DATASET, tableName), defaultValueTableDefinition).build(); + bigquery.create(tableInfo); + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(exclusiveTableId) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(exclusiveTableId, writeStream.getTableSchema()) + .setDefaultMissingValueInterpretation(MissingValueInterpretation.DEFAULT_VALUE) + .build()) { + testJsonStreamWriterForDefaultValue(jsonStreamWriter); + } + } + + private void testJsonStreamWriterForDefaultValue(JsonStreamWriter jsonStreamWriter) + throws DescriptorValidationException, + IOException, + ExecutionException, + InterruptedException, + ParseException { + // 1. row has both fields set. + JSONArray jsonArr1 = new JSONArray(); + JSONObject row1 = new JSONObject(); + row1.put("foo_with_default", "aaa"); + row1.put("bar_without_default", "a"); + row1.put("date_with_default_to_current", "2022-02-02 01:02:03"); + jsonArr1.put(row1); + // 2. row with the column with default value unset + JSONObject row2 = new JSONObject(); + row2.put("bar_without_default", "a"); + jsonArr1.put(row2); + // 3. both value not set + JSONObject row3 = new JSONObject(); + jsonArr1.put(row3); + + // Start insertion and validation. + ApiFuture response1 = jsonStreamWriter.append(jsonArr1, -1); + response1.get(); + TableResult result = + bigquery.listTableData(tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); + Iterator iter = result.getValues().iterator(); + + FieldValueList currentRow = iter.next(); + assertEquals("aaa", currentRow.get(0).getStringValue()); + assertEquals("a", currentRow.get(1).getStringValue()); + assertEquals( + Timestamp.valueOf("2022-02-02 01:02:03") + .toLocalDateTime() + .atZone(ZoneId.of("UTC")) + .toInstant() + .toEpochMilli(), + Double.valueOf(currentRow.get(2).getStringValue()).longValue() * 1000); + + currentRow = iter.next(); + assertEquals("default_value_for_test", currentRow.get(0).getStringValue()); + assertFalse(currentRow.get(2).getStringValue().isEmpty()); + assertEquals("a", currentRow.get(1).getStringValue()); + // Check whether the recorded value is up to date enough. + Instant parsedInstant = + Instant.ofEpochSecond(Double.valueOf(currentRow.get(2).getStringValue()).longValue()); + assertTrue(parsedInstant.isAfter(Instant.now().minus(1, ChronoUnit.HOURS))); + + currentRow = iter.next(); + assertEquals("default_value_for_test", currentRow.get(0).getStringValue()); + assertNull(currentRow.get(1).getValue()); + assertFalse(currentRow.get(2).getStringValue().isEmpty()); + // Check whether the recorded value is up to date enough. + parsedInstant = + Instant.ofEpochSecond(Double.valueOf(currentRow.get(2).getStringValue()).longValue()); + assertTrue(parsedInstant.isAfter(Instant.now().minus(1, ChronoUnit.HOURS))); + + assertFalse(iter.hasNext()); + } + + @Test + void testStreamWriterWithDefaultValue() throws ExecutionException, InterruptedException { + String tableName = "streamWriterWithDefaultValue"; + String exclusiveTableId = + String.format( + "projects/%s/datasets/%s/tables/%s", + ServiceOptions.getDefaultProjectId(), DATASET, tableName); + tableInfo = + TableInfo.newBuilder(TableId.of(DATASET, tableName), defaultValueTableDefinition).build(); + bigquery.create(tableInfo); + try (StreamWriter streamWriter = + StreamWriter.newBuilder(exclusiveTableId + "/_default") + .setWriterSchema( + ProtoSchemaConverter.convert(SimpleTypeForDefaultValue.getDescriptor())) + .setDefaultMissingValueInterpretation(MissingValueInterpretation.DEFAULT_VALUE) + .setEnableConnectionPool(true) + .setTraceId(TEST_TRACE_ID) + .build()) { + // 1. row has both fields set. + SimpleTypeForDefaultValue simpleTypeForDefaultValue1 = + SimpleTypeForDefaultValue.newBuilder() + .setFooWithDefault("foo_value") + .setBarWithoutDefault("bar_value") + .setDateWithDefaultToCurrent("2022-02-02 01:02:03") + .build(); + + // 2. row without any column set, expect default value to be filled + SimpleTypeForDefaultValue simpleTypeForDefaultValue2 = + SimpleTypeForDefaultValue.newBuilder().build(); + ProtoRows rows = + ProtoRows.newBuilder() + .addSerializedRows(simpleTypeForDefaultValue1.toByteString()) + .addSerializedRows(simpleTypeForDefaultValue2.toByteString()) + .build(); + + // Start insertion and validation. + ApiFuture response1 = streamWriter.append(rows); + response1.get(); + TableResult result = + bigquery.listTableData( + tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); + Iterator iter = result.getValues().iterator(); + + FieldValueList currentRow = iter.next(); + assertEquals("foo_value", currentRow.get(0).getStringValue()); + assertEquals("bar_value", currentRow.get(1).getStringValue()); + assertEquals( + Timestamp.valueOf("2022-02-02 01:02:03") + .toLocalDateTime() + .atZone(ZoneId.of("UTC")) + .toInstant() + .toEpochMilli(), + Double.valueOf(currentRow.get(2).getStringValue()).longValue() * 1000); + + currentRow = iter.next(); + assertEquals("default_value_for_test", currentRow.get(0).getStringValue()); + assertNull(currentRow.get(1).getValue()); + assertFalse(currentRow.get(2).getStringValue().isEmpty()); + // Check whether the recorded value is up to date enough. + Instant parsedInstant = + Instant.ofEpochSecond(Double.valueOf(currentRow.get(2).getStringValue()).longValue()); + assertTrue(parsedInstant.isAfter(Instant.now().minus(1, ChronoUnit.HOURS))); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Test + void testArrowIngestionWithSerializedInput() + throws IOException, InterruptedException, ExecutionException, TimeoutException { + testArrowIngestion(/* serializedInput= */ true); + } + + @Test + void testArrowIngestionWithUnSerializedInput() + throws IOException, InterruptedException, ExecutionException, TimeoutException { + testArrowIngestion(/* serializedInput= */ false); + } + + private void testArrowIngestion(boolean serializedInput) + throws IOException, InterruptedException, ExecutionException, TimeoutException { + String tableName = + serializedInput + ? "arrowIngestionWithSerializedInput" + : "arrowIngestionWithUnSerializedInput"; + TableInfo tableInfo = + TableInfo.newBuilder( + TableId.of(DATASET, tableName), + StandardTableDefinition.of( + Schema.of( + com.google.cloud.bigquery.Field.newBuilder("foo", LegacySQLTypeName.STRING) + .setMode(Field.Mode.NULLABLE) + .build(), + com.google.cloud.bigquery.Field.newBuilder("bar", LegacySQLTypeName.INTEGER) + .setMode(Field.Mode.NULLABLE) + .build(), + com.google.cloud.bigquery.Field.newBuilder("baz", LegacySQLTypeName.BOOLEAN) + .setMode(Field.Mode.NULLABLE) + .build()))) + .build(); + bigquery.create(tableInfo); + String tableId = + String.format( + "projects/%s/datasets/%s/tables/%s", + ServiceOptions.getDefaultProjectId(), DATASET, tableName); + + // Define Arrow schema + List fields = + ImmutableList.of( + new org.apache.arrow.vector.types.pojo.Field( + "foo", FieldType.nullable(new ArrowType.Utf8()), null), + new org.apache.arrow.vector.types.pojo.Field( + "bar", FieldType.nullable(new ArrowType.Int(64, true)), null), + new org.apache.arrow.vector.types.pojo.Field( + "baz", FieldType.nullable(new ArrowType.Bool()), null)); + org.apache.arrow.vector.types.pojo.Schema arrowSchema = + new org.apache.arrow.vector.types.pojo.Schema(fields, null); + ArrowSchema v1ArrowSchema; + ArrowRecordBatch v1ArrowRecordBatch; + org.apache.arrow.vector.ipc.message.ArrowRecordBatch recordBatch; + + try (VectorSchemaRoot root = VectorSchemaRoot.create(arrowSchema, allocator)) { + // Create Arrow data + VarCharVector foo = (VarCharVector) root.getVector("foo"); + foo.allocateNew(3); + foo.set(0, "A".getBytes()); + foo.set(1, "B".getBytes()); + foo.set(2, "C".getBytes()); + BigIntVector bar = (BigIntVector) root.getVector("bar"); + bar.allocateNew(3); + bar.set(0, 1); + bar.set(1, 2); + bar.set(2, 3); + BitVector baz = (BitVector) root.getVector("baz"); + baz.allocateNew(3); + baz.set(0, 1); + baz.set(1, 0); + baz.set(2, 1); + root.setRowCount(3); + + // Create IPC payload + ByteArrayOutputStream out = new ByteArrayOutputStream(); + MessageSerializer.serialize(new WriteChannel(Channels.newChannel(out)), arrowSchema); + v1ArrowSchema = + ArrowSchema.newBuilder() + .setSerializedSchema(ByteString.copyFrom(out.toByteArray())) + .build(); + + CompressionCodec codec = + NoCompressionCodec.Factory.INSTANCE.createCodec(CompressionUtil.CodecType.NO_COMPRESSION); + VectorUnloader vectorUnloader = + new VectorUnloader(root, /* includeNullCount= */ true, codec, /* alignBuffers= */ true); + recordBatch = vectorUnloader.getRecordBatch(); + + out = new ByteArrayOutputStream(); + MessageSerializer.serialize(new WriteChannel(Channels.newChannel(out)), recordBatch); + v1ArrowRecordBatch = + ArrowRecordBatch.newBuilder() + .setSerializedRecordBatch(ByteString.copyFrom(out.toByteArray())) + .build(); + } + if (serializedInput) { + try (StreamWriter streamWriter = + StreamWriter.newBuilder(tableId + "/_default", writeClient) + .setWriterSchema(v1ArrowSchema) + .setTraceId(TEST_TRACE_ID) + .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) + .setRetrySettings( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(java.time.Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.3) + .setMaxAttempts(3) + .setMaxRetryDelayDuration(java.time.Duration.ofMinutes(5)) + .build()) + .build()) { + ApiFuture response = streamWriter.append(v1ArrowRecordBatch); + assertEquals(0, response.get().getAppendResult().getOffset().getValue()); + } + } else { + try (StreamWriter streamWriter = + StreamWriter.newBuilder(tableId + "/_default", writeClient) + .setWriterSchema(arrowSchema) + .setTraceId(TEST_TRACE_ID) + .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) + .setRetrySettings( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(java.time.Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.3) + .setMaxAttempts(3) + .setMaxRetryDelayDuration(java.time.Duration.ofMinutes(5)) + .build()) + .build()) { + ApiFuture response = streamWriter.append(recordBatch); + assertEquals(0, response.get().getAppendResult().getOffset().getValue()); + } + } + + TableResult result = + bigquery.listTableData(tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); + Iterator iter = result.getValues().iterator(); + FieldValueList currentRow = iter.next(); + assertEquals("A", currentRow.get(0).getStringValue()); + assertEquals("1", currentRow.get(1).getStringValue()); + assertTrue(currentRow.get(2).getBooleanValue()); + currentRow = iter.next(); + assertEquals("B", currentRow.get(0).getStringValue()); + assertEquals("2", currentRow.get(1).getStringValue()); + assertFalse(currentRow.get(2).getBooleanValue()); + currentRow = iter.next(); + assertEquals("C", currentRow.get(0).getStringValue()); + assertEquals("3", currentRow.get(1).getStringValue()); + assertTrue(currentRow.get(2).getBooleanValue()); + assertFalse(iter.hasNext()); + } + + // This test runs about 1 min. + @Test + void testJsonStreamWriterWithMessagesOver10M() + throws IOException, + InterruptedException, + ExecutionException, + Descriptors.DescriptorValidationException { + String tableName = "TableLarge"; + TableId tableId = TableId.of(DATASET, tableName); + Field col1 = Field.newBuilder("col1", StandardSQLTypeName.STRING).build(); + Schema schema = Schema.of(col1); + TableInfo tableInfo = TableInfo.newBuilder(tableId, StandardTableDefinition.of(schema)).build(); + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(parent.toString()) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + int totalRequest = 10; + int rowBatch = 40000; + ArrayList> allResponses = + new ArrayList>(totalRequest); + // Sends a total of 30MB over the wire. + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(writeStream.getName(), writeStream.getTableSchema()).build()) { + for (int k = 0; k < totalRequest; k++) { + JSONObject row = new JSONObject(); + row.put("col1", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); + JSONArray jsonArr = new JSONArray(); + // 3MB batch. + for (int j = 0; j < rowBatch; j++) { + jsonArr.put(row); + } + LOG.info("Appending: " + k + "/" + totalRequest); + allResponses.add(jsonStreamWriter.append(jsonArr, k * rowBatch)); + } + } + LOG.info("Waiting for all responses to come back"); + for (int i = 0; i < totalRequest; i++) { + try { + assertEquals( + allResponses.get(i).get().getAppendResult().getOffset().getValue(), i * rowBatch); + } catch (ExecutionException ex) { + fail("Unexpected error " + ex); + } + } + } + + @Test + void testJsonStreamWriterSchemaUpdate() + throws DescriptorValidationException, IOException, InterruptedException, ExecutionException { + String tableName = "SchemaUpdateTestTable"; + TableId tableId = TableId.of(DATASET, tableName); + Field col1 = Field.newBuilder("col1", StandardSQLTypeName.STRING).build(); + Schema originalSchema = Schema.of(col1); + TableInfo tableInfo = + TableInfo.newBuilder(tableId, StandardTableDefinition.of(originalSchema)).build(); + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(parent.toString()) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(writeStream.getName(), writeClient).build()) { + // write the 1st row + JSONObject foo = new JSONObject(); + foo.put("col1", "aaa"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + ApiFuture response = jsonStreamWriter.append(jsonArr, 0); + assertEquals(0, response.get().getAppendResult().getOffset().getValue()); + + // update schema with a new column + Field col2 = Field.newBuilder("col2", StandardSQLTypeName.STRING).build(); + Schema updatedSchema = Schema.of(ImmutableList.of(col1, col2)); + TableInfo updatedTableInfo = + TableInfo.newBuilder(tableId, StandardTableDefinition.of(updatedSchema)).build(); + Table updatedTable = bigquery.update(updatedTableInfo); + assertEquals(updatedSchema, updatedTable.getDefinition().getSchema()); + + // continue writing rows until backend acknowledges schema update + JSONObject foo2 = new JSONObject(); + foo2.put("col1", "bbb"); + JSONArray jsonArr2 = new JSONArray(); + jsonArr2.put(foo2); + + int next = 0; + for (int i = 1; i < 100; i++) { + ApiFuture response2 = jsonStreamWriter.append(jsonArr2, i); + assertEquals(i, response2.get().getAppendResult().getOffset().getValue()); + if (response2.get().hasUpdatedSchema()) { + next = i; + break; + } else { + Thread.sleep(1000); + } + } + + // write rows with updated schema. + JSONObject updatedFoo = new JSONObject(); + updatedFoo.put("col1", "ccc"); + updatedFoo.put("col2", "ddd"); + JSONArray updatedJsonArr = new JSONArray(); + updatedJsonArr.put(updatedFoo); + for (int i = 0; i < 10; i++) { + ApiFuture response3 = + jsonStreamWriter.append(updatedJsonArr, next + 1 + i); + assertEquals(next + 1 + i, response3.get().getAppendResult().getOffset().getValue()); + } + + // verify table data correctness + Iterator rowsIter = bigquery.listTableData(tableId).getValues().iterator(); + // 1 row of aaa + assertEquals("aaa", rowsIter.next().get(0).getStringValue()); + // a few rows of bbb + for (int j = 1; j <= next; j++) { + assertEquals("bbb", rowsIter.next().get(0).getStringValue()); + } + // 10 rows of ccc, ddd + for (int j = next + 1; j < next + 1 + 10; j++) { + FieldValueList temp = rowsIter.next(); + assertEquals("ccc", temp.get(0).getStringValue()); + assertEquals("ddd", temp.get(1).getStringValue()); + } + assertFalse(rowsIter.hasNext()); + } + } + + @Test + void testJsonStreamWriterSchemaUpdateConcurrent() + throws DescriptorValidationException, IOException, InterruptedException { + // Create test table and test stream + String tableName = "ConcurrentSchemaUpdateTestTable"; + TableId tableId = TableId.of(DATASET, tableName); + Field col1 = Field.newBuilder("col1", StandardSQLTypeName.STRING).build(); + Schema originalSchema = Schema.of(col1); + TableInfo tableInfo = + TableInfo.newBuilder(tableId, StandardTableDefinition.of(originalSchema)).build(); + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(parent.toString()) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + + // Create test JSON objects + JSONObject foo = new JSONObject(); + foo.put("col1", "aaa"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + + JSONObject foo2 = new JSONObject(); + foo2.put("col1", "bbb"); + JSONArray jsonArr2 = new JSONArray(); + jsonArr2.put(foo2); + + JSONObject updatedFoo = new JSONObject(); + updatedFoo.put("col1", "ccc"); + updatedFoo.put("col2", "ddd"); + JSONArray updatedJsonArr = new JSONArray(); + updatedJsonArr.put(updatedFoo); + + // Prepare updated schema + Field col2 = Field.newBuilder("col2", StandardSQLTypeName.STRING).build(); + Schema updatedSchema = Schema.of(ImmutableList.of(col1, col2)); + TableInfo updatedTableInfo = + TableInfo.newBuilder(tableId, StandardTableDefinition.of(updatedSchema)).build(); + + // Start writing using the JsonWriter + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(writeStream.getName(), writeClient).build()) { + int numberOfThreads = 5; + CountDownLatch latch; + AtomicInteger next; + ExecutorService streamTaskExecutor = Executors.newFixedThreadPool(5); + latch = new CountDownLatch(numberOfThreads); + // Used to verify data correctness + next = new AtomicInteger(); + + // update TableSchema async + Runnable updateTableSchemaTask = + () -> { + Table updatedTable = bigquery.update(updatedTableInfo); + assertEquals(updatedSchema, updatedTable.getDefinition().getSchema()); + }; + streamTaskExecutor.execute(updateTableSchemaTask); + + // stream data async + for (int i = 0; i < numberOfThreads; i++) { + streamTaskExecutor.submit( + () -> { + // write 2 rows of aaa on each Thread + for (int j = 0; j < 2; j++) { + try { + jsonStreamWriter.append(jsonArr); + next.getAndIncrement(); + } catch (IOException | DescriptorValidationException e) { + e.printStackTrace(); + } + } + + // write filler rows bbb until backend acknowledges schema update due to possible + // delay + for (int w = 0; w < 15; w++) { + ApiFuture response2 = null; + try { + response2 = jsonStreamWriter.append(jsonArr2); + next.getAndIncrement(); + } catch (IOException | DescriptorValidationException e) { + LOG.severe("Issue with append " + e.getMessage()); + } + try { + assert response2 != null; + if (response2.get().hasUpdatedSchema()) { + break; + } else { + Thread.sleep(1000); + } + } catch (InterruptedException | ExecutionException e) { + LOG.severe("Issue with append " + e.getMessage()); + } + } + + // write 5 rows of ccc,ddd on each Thread + for (int m = 0; m < 5; m++) { + try { + jsonStreamWriter.append(updatedJsonArr); + next.getAndIncrement(); + } catch (IOException | DescriptorValidationException e) { + LOG.severe("Issue with append " + e.getMessage()); + } + } + latch.countDown(); + }); + } + latch.await(); + streamTaskExecutor.shutdown(); + + // verify that the last 5 rows streamed are ccc,ddd + Iterator rowsIter = bigquery.listTableData(tableId).getValues().iterator(); + + int position = 0; + while (rowsIter.hasNext()) { + FieldValueList row = rowsIter.next(); + position++; + if (position > next.get() - 5) { + assertEquals("ccc", row.get(0).getStringValue()); + assertEquals("ddd", row.get(1).getStringValue()); + } + } + } + } + + @Test + void testJsonStreamWriterSchemaUpdateWithMissingValueInterpretationMap() + throws DescriptorValidationException, ExecutionException, IOException, InterruptedException { + String tableName = "SchemaUpdateMissingValueMapTestTable"; + TableId tableId = TableId.of(DATASET, tableName); + tableInfo = TableInfo.newBuilder(tableId, defaultValueTableDefinition).build(); + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(parent.toString()) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + Map missingValueMap = new HashMap<>(); + missingValueMap.put( + "foo_with_default", AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE); + missingValueMap.put( + "date_with_default_to_current", AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE); + + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(writeStream.getName(), writeClient) + .setMissingValueInterpretationMap(missingValueMap) + .build()) { + // Verify the missing value map + assertEquals(missingValueMap, jsonStreamWriter.getMissingValueInterpretationMap()); + + // First append with the current schema + JSONObject jsonObject = new JSONObject(); + jsonObject.put("bar_without_default", "existing_col_before_update"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(jsonObject); + ApiFuture response1 = jsonStreamWriter.append(jsonArr, 0); + assertEquals(0, response1.get().getAppendResult().getOffset().getValue()); + + // Add a column to the table + Field newCol = + Field.newBuilder("new_col_without_default", StandardSQLTypeName.STRING) + .setMode(Field.Mode.NULLABLE) + .build(); + ArrayList updatedFields = + new ArrayList<>(defaultValueTableDefinition.getSchema().getFields()); + updatedFields.add(newCol); + Schema updatedSchema = Schema.of(updatedFields); + TableInfo updatedTableInfo = + TableInfo.newBuilder(tableId, StandardTableDefinition.of(updatedSchema)).build(); + Table updatedTable = bigquery.update(updatedTableInfo); + assertEquals(updatedSchema, updatedTable.getDefinition().getSchema()); + + // Continue writing rows until backend acknowledges schema update + JSONObject jsonObject2 = new JSONObject(); + jsonObject2.put("bar_without_default", "no_schema_update_yet"); + JSONArray jsonArr2 = new JSONArray(); + jsonArr2.put(jsonObject2); + + int nextI = 0; + for (int i = 1; i < 100; i++) { + ApiFuture response2 = jsonStreamWriter.append(jsonArr2, i); + assertEquals(i, response2.get().getAppendResult().getOffset().getValue()); + if (response2.get().hasUpdatedSchema()) { + nextI = i + 1; + break; + } else { + Thread.sleep(1000); + } + } + + // Write using the new schema with 10 new requests + JSONObject updatedCol = new JSONObject(); + updatedCol.put("bar_without_default", "existing_col"); + updatedCol.put("new_col_without_default", "new_col"); + JSONArray updatedJsonArr = new JSONArray(); + updatedJsonArr.put(updatedCol); + for (int i = nextI; i < nextI + 10; i++) { + ApiFuture response3 = jsonStreamWriter.append(updatedJsonArr, i); + assertEquals(i, response3.get().getAppendResult().getOffset().getValue()); + } + + // List all rows to verify table data correctness + Iterator rowsIter = bigquery.listTableData(tableId).getValues().iterator(); + + // Verify 1st row (with "existing_col_before_update") + FieldValueList currentRow = rowsIter.next(); + assertEquals("default_value_for_test", currentRow.get(0).getStringValue()); + assertEquals("existing_col_before_update", currentRow.get(1).getStringValue()); + assertFalse(currentRow.get(2).getStringValue().isEmpty()); + // Check whether the recorded value is close enough. + Instant parsedInstant = + Instant.ofEpochSecond(Double.valueOf(currentRow.get(2).getStringValue()).longValue()); + assertTrue(parsedInstant.isAfter(Instant.now().minus(1, ChronoUnit.HOURS))); + + // A few rows (with "no_schema_update_yet") until the schema was updated + for (int j = 1; j < nextI; j++) { + currentRow = rowsIter.next(); + assertEquals("default_value_for_test", currentRow.get(0).getStringValue()); + assertEquals("no_schema_update_yet", currentRow.get(1).getStringValue()); + // Check whether the recorded value is close enough. + parsedInstant = + Instant.ofEpochSecond(Double.valueOf(currentRow.get(2).getStringValue()).longValue()); + assertTrue(parsedInstant.isAfter(Instant.now().minus(1, ChronoUnit.HOURS))); + } + // 10 rows after schema update with new column included + for (int j = nextI; j < nextI + 10; j++) { + currentRow = rowsIter.next(); + assertEquals("default_value_for_test", currentRow.get(0).getStringValue()); + assertEquals("existing_col", currentRow.get(1).getStringValue()); + assertFalse(currentRow.get(2).getStringValue().isEmpty()); + // Check whether the recorded value is close enough. + parsedInstant = + Instant.ofEpochSecond(Double.valueOf(currentRow.get(2).getStringValue()).longValue()); + assertTrue(parsedInstant.isAfter(Instant.now().minus(1, ChronoUnit.HOURS))); + // Verify the new column + assertEquals("new_col", currentRow.get(3).getStringValue()); + } + assertFalse(rowsIter.hasNext()); + + // Verify that the missing value map hasn't changed + assertEquals(missingValueMap, jsonStreamWriter.getMissingValueInterpretationMap()); + } + } + + @Test + void testJsonStreamWriterWithFlexibleColumnName() + throws IOException, + InterruptedException, + ExecutionException, + Descriptors.DescriptorValidationException { + String tableName = "FlexibleColumnTable"; + TableInfo tableInfo = + TableInfo.newBuilder( + TableId.of(DATASET, tableName), + StandardTableDefinition.of( + Schema.of( + com.google.cloud.bigquery.Field.newBuilder( + "test-str列", StandardSQLTypeName.STRING) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test-numerics列", StandardSQLTypeName.NUMERIC) + .setMode(Field.Mode.REPEATED) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test-datetime列", StandardSQLTypeName.DATETIME) + .build()))) + .build(); + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(parent.toString()) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(writeStream.getName(), writeStream.getTableSchema()).build()) { + LOG.info("Sending one message"); + JSONObject row1 = new JSONObject(); + row1.put("test-str列", "aaa"); + row1.put( + "test-numerics列", + new JSONArray( + new byte[][] { + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("123.4")) + .toByteArray(), + BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("-9000000")) + .toByteArray() + })); + row1.put( + "test-datetime列", + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.of(2020, 10, 1, 12, 0))); + JSONArray jsonArr1 = new JSONArray(new JSONObject[] {row1}); + + ApiFuture response1 = jsonStreamWriter.append(jsonArr1, -1); + + assertEquals(0, response1.get().getAppendResult().getOffset().getValue()); + + JSONObject row2 = new JSONObject(); + row2.put("test-str列", "bbb"); + JSONObject row3 = new JSONObject(); + row3.put("test-str列", "ccc"); + JSONArray jsonArr2 = new JSONArray(); + jsonArr2.put(row2); + jsonArr2.put(row3); + + JSONObject row4 = new JSONObject(); + row4.put("test-str列", "ddd"); + JSONArray jsonArr3 = new JSONArray(); + jsonArr3.put(row4); + + LOG.info("Sending two more messages"); + ApiFuture response2 = jsonStreamWriter.append(jsonArr2, -1); + LOG.info("Sending one more message"); + ApiFuture response3 = jsonStreamWriter.append(jsonArr3, -1); + assertEquals(1, response2.get().getAppendResult().getOffset().getValue()); + assertEquals(3, response3.get().getAppendResult().getOffset().getValue()); + + TableResult result = + bigquery.listTableData( + tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); + Iterator iter = result.getValues().iterator(); + FieldValueList currentRow = iter.next(); + assertEquals("aaa", currentRow.get(0).getStringValue()); + assertEquals("-9000000", currentRow.get(1).getRepeatedValue().get(1).getStringValue()); + assertEquals("2020-10-01T12:00:00", currentRow.get(2).getStringValue()); + assertEquals("bbb", iter.next().get(0).getStringValue()); + assertEquals("ccc", iter.next().get(0).getStringValue()); + assertEquals("ddd", iter.next().get(0).getStringValue()); + assertFalse(iter.hasNext()); + } + } + + @Test + void testJsonStreamWriterWithNestedFlexibleColumnName() + throws IOException, + InterruptedException, + ExecutionException, + Descriptors.DescriptorValidationException { + String tableName = "NestedFlexibleColumnTable"; + TableInfo tableInfo = + TableInfo.newBuilder( + TableId.of(DATASET, tableName), + StandardTableDefinition.of( + Schema.of( + com.google.cloud.bigquery.Field.newBuilder( + "test-str列", StandardSQLTypeName.STRING) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test-record列", + StandardSQLTypeName.STRUCT, + com.google.cloud.bigquery.Field.of( + "nested-str列", StandardSQLTypeName.STRING), + com.google.cloud.bigquery.Field.of( + "nested-int列", StandardSQLTypeName.INT64)) + .setMode(Field.Mode.REPEATED) + .build()))) + .build(); + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(parent.toString()) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(writeStream.getName(), writeStream.getTableSchema()).build()) { + LOG.info("Sending one message"); + JSONObject row1 = new JSONObject(); + row1.put("test-str列", "aaa"); + JSONObject record1 = new JSONObject(); + record1.put("nested-str列", "nested-str1"); + record1.put("nested-int列", 10); + row1.put("test-record列", new JSONArray(new JSONObject[] {record1})); + JSONArray jsonArr1 = new JSONArray(new JSONObject[] {row1}); + + ApiFuture response1 = jsonStreamWriter.append(jsonArr1, -1); + + assertEquals(0, response1.get().getAppendResult().getOffset().getValue()); + + JSONObject row2 = new JSONObject(); + row2.put("test-str列", "bbb"); + JSONObject row3 = new JSONObject(); + row3.put("test-str列", "ccc"); + JSONArray jsonArr2 = new JSONArray(); + jsonArr2.put(row2); + jsonArr2.put(row3); + + JSONObject row4 = new JSONObject(); + row4.put("test-str列", "ddd"); + JSONObject record2 = new JSONObject(); + record2.put("nested-str列", "nested-str2"); + record2.put("nested-int列", 20); + row4.put("test-record列", new JSONArray(new JSONObject[] {record2})); + JSONArray jsonArr3 = new JSONArray(); + jsonArr3.put(row4); + + LOG.info("Sending two more messages"); + ApiFuture response2 = jsonStreamWriter.append(jsonArr2, -1); + LOG.info("Sending one more message"); + ApiFuture response3 = jsonStreamWriter.append(jsonArr3, -1); + assertEquals(1, response2.get().getAppendResult().getOffset().getValue()); + assertEquals(3, response3.get().getAppendResult().getOffset().getValue()); + + TableResult result = + bigquery.listTableData( + tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); + Iterator iter = result.getValues().iterator(); + FieldValueList currentRow = iter.next(); + assertEquals("aaa", currentRow.get(0).getStringValue()); + FieldValueList currentRecord = currentRow.get(1).getRepeatedValue().get(0).getRecordValue(); + assertEquals("nested-str1", currentRecord.get(0).getStringValue()); + assertEquals("10", currentRecord.get(1).getStringValue()); + assertEquals("bbb", iter.next().get(0).getStringValue()); + assertEquals("ccc", iter.next().get(0).getStringValue()); + FieldValueList lastRow = iter.next(); + assertEquals("ddd", lastRow.get(0).getStringValue()); + FieldValueList lastRecord = lastRow.get(1).getRepeatedValue().get(0).getRecordValue(); + assertEquals("nested-str2", lastRecord.get(0).getStringValue()); + assertEquals("20", lastRecord.get(1).getStringValue()); + assertFalse(iter.hasNext()); + } + } + + @Test + void testJsonStreamWriterSchemaUpdateWithFlexibleColumnName() + throws DescriptorValidationException, IOException, InterruptedException, ExecutionException { + String tableName = "SchemaUpdateFlexColumnTestTable"; + TableId tableId = TableId.of(DATASET, tableName); + Field col1 = Field.newBuilder("col1-列", StandardSQLTypeName.STRING).build(); + Schema originalSchema = Schema.of(col1); + TableInfo tableInfo = + TableInfo.newBuilder(tableId, StandardTableDefinition.of(originalSchema)).build(); + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(parent.toString()) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(writeStream.getName(), writeClient).build()) { + // write the 1st row + JSONObject foo = new JSONObject(); + foo.put("col1-列", "aaa"); + JSONArray jsonArr = new JSONArray(); + jsonArr.put(foo); + ApiFuture response = jsonStreamWriter.append(jsonArr, 0); + assertEquals(0, response.get().getAppendResult().getOffset().getValue()); + + // update schema with a new column + Field col2 = Field.newBuilder("col2-列", StandardSQLTypeName.STRING).build(); + Schema updatedSchema = Schema.of(ImmutableList.of(col1, col2)); + TableInfo updatedTableInfo = + TableInfo.newBuilder(tableId, StandardTableDefinition.of(updatedSchema)).build(); + Table updatedTable = bigquery.update(updatedTableInfo); + assertEquals(updatedSchema, updatedTable.getDefinition().getSchema()); + + // continue writing rows until backend acknowledges schema update + JSONObject foo2 = new JSONObject(); + foo2.put("col1-列", "bbb"); + JSONArray jsonArr2 = new JSONArray(); + jsonArr2.put(foo2); + + int next = 0; + for (int i = 1; i < 100; i++) { + ApiFuture response2 = jsonStreamWriter.append(jsonArr2, i); + assertEquals(i, response2.get().getAppendResult().getOffset().getValue()); + if (response2.get().hasUpdatedSchema()) { + next = i; + break; + } else { + Thread.sleep(1000); + } + } + + // write rows with updated schema. + JSONObject updatedFoo = new JSONObject(); + updatedFoo.put("col1-列", "ccc"); + updatedFoo.put("col2-列", "ddd"); + JSONArray updatedJsonArr = new JSONArray(); + updatedJsonArr.put(updatedFoo); + for (int i = 0; i < 10; i++) { + ApiFuture response3 = + jsonStreamWriter.append(updatedJsonArr, next + 1 + i); + assertEquals(next + 1 + i, response3.get().getAppendResult().getOffset().getValue()); + } + + // verify table data correctness + Iterator rowsIter = bigquery.listTableData(tableId).getValues().iterator(); + // 1 row of aaa + assertEquals("aaa", rowsIter.next().get(0).getStringValue()); + // a few rows of bbb + for (int j = 1; j <= next; j++) { + assertEquals("bbb", rowsIter.next().get(0).getStringValue()); + } + // 10 rows of ccc, ddd + for (int j = next + 1; j < next + 1 + 10; j++) { + FieldValueList temp = rowsIter.next(); + assertEquals("ccc", temp.get(0).getStringValue()); + assertEquals("ddd", temp.get(1).getStringValue()); + } + assertFalse(rowsIter.hasNext()); + } + } + + @Test + void testComplicateSchemaWithPendingStream() + throws IOException, InterruptedException, ExecutionException { + LOG.info("Create a write stream"); + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(tableId2) + .setWriteStream(WriteStream.newBuilder().setType(WriteStream.Type.PENDING).build()) + .build()); + FinalizeWriteStreamResponse finalizeResponse; + try (StreamWriter streamWriter = + StreamWriter.newBuilder(writeStream.getName()) + .setWriterSchema(ProtoSchemaConverter.convert(ComplicateType.getDescriptor())) + .build()) { + LOG.info("Sending two messages"); + ApiFuture response = + streamWriter.append(createProtoRowsComplex(new String[] {"aaa"}), 0L); + assertEquals(0, response.get().getAppendResult().getOffset().getValue()); + + ApiFuture response2 = + streamWriter.append(createProtoRowsComplex(new String[] {"bbb"}), 1L); + assertEquals(1, response2.get().getAppendResult().getOffset().getValue()); + + // Nothing showed up since rows are not committed. + TableResult result = + bigquery.listTableData( + tableInfo2.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); + Iterator iter = result.getValues().iterator(); + assertFalse(iter.hasNext()); + + LOG.info("Finalize a write stream"); + finalizeResponse = + writeClient.finalizeWriteStream( + FinalizeWriteStreamRequest.newBuilder().setName(writeStream.getName()).build()); + + ApiFuture response3 = + streamWriter.append(createProtoRows(new String[] {"ccc"}), 2L); + ExecutionException expected = assertThrows(ExecutionException.class, () -> response3.get()); + LOG.info("Got exception: " + expected.toString()); + } + assertEquals(2, finalizeResponse.getRowCount()); + LOG.info("Commit a write stream"); + BatchCommitWriteStreamsResponse batchCommitWriteStreamsResponse = + writeClient.batchCommitWriteStreams( + BatchCommitWriteStreamsRequest.newBuilder() + .setParent(tableId2) + .addWriteStreams(writeStream.getName()) + .build()); + assertTrue(batchCommitWriteStreamsResponse.hasCommitTime()); + TableResult queryResult = + bigquery.query( + QueryJobConfiguration.newBuilder("SELECT * from " + DATASET + '.' + TABLE2).build()); + Iterator queryIter = queryResult.getValues().iterator(); + assertTrue(queryIter.hasNext()); + assertEquals( + "[FieldValue{attribute=REPEATED, value=[FieldValue{attribute=PRIMITIVE, value=aaa," + + " useInt64Timestamps=false}, FieldValue{attribute=PRIMITIVE, value=aaa," + + " useInt64Timestamps=false}], useInt64Timestamps=false}]", + queryIter.next().get(1).getRepeatedValue().toString()); + assertEquals( + "[FieldValue{attribute=REPEATED, value=[FieldValue{attribute=PRIMITIVE, value=bbb," + + " useInt64Timestamps=false}, FieldValue{attribute=PRIMITIVE, value=bbb," + + " useInt64Timestamps=false}], useInt64Timestamps=false}]", + queryIter.next().get(1).getRepeatedValue().toString()); + assertFalse(queryIter.hasNext()); + } + + @Test + void testStreamError() throws IOException, InterruptedException, ExecutionException { + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(tableId) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + try (StreamWriter streamWriter = + StreamWriter.newBuilder(writeStream.getName()) + .setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())) + .build()) { + ApiFuture response = + streamWriter.append(createProtoRows(new String[] {"aaa"}), -1L); + assertEquals(0L, response.get().getAppendResult().getOffset().getValue()); + // Send in a bogus stream name should cause in connection error. + ApiFuture response2 = + streamWriter.append(createProtoRows(new String[] {"aaa"}), 100L); + ExecutionException e = assertThrows(ExecutionException.class, () -> response2.get()); + assertThat(e.getCause().getMessage()) + .contains("OUT_OF_RANGE: The offset is beyond stream, expected offset 1, received 100"); + // We can keep sending requests on the same stream. + ApiFuture response3 = + streamWriter.append(createProtoRows(new String[] {"aaa"}), -1L); + assertEquals(1L, response3.get().getAppendResult().getOffset().getValue()); + } finally { + } + } + + @Test + void testStreamSchemaMisMatchError() throws IOException, InterruptedException { + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(tableId) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + + try (StreamWriter streamWriter = + StreamWriter.newBuilder(writeStream.getName()) + .setWriterSchema(ProtoSchemaConverter.convert(UpdatedFooType.getDescriptor())) + .build()) { + // Create a proto row that has extra fields than the table schema defined which should trigger + // the SCHEMA_MISMATCH_EXTRA_FIELDS error + ApiFuture response = + streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0); + ExecutionException e = assertThrows(ExecutionException.class, () -> response.get()); + assertEquals(Exceptions.SchemaMismatchedException.class, e.getCause().getClass()); + Exceptions.SchemaMismatchedException actualError = (SchemaMismatchedException) e.getCause(); + assertNotNull(actualError.getStreamName()); + // This verifies that the Beam connector can consume this custom exception's grpc StatusCode + assertEquals(Code.INVALID_ARGUMENT, Status.fromThrowable(e.getCause()).getCode()); + } + } + + @Test + void testStreamFinalizedError() throws IOException, InterruptedException, ExecutionException { + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(tableId) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + try (StreamWriter streamWriter = + StreamWriter.newBuilder(writeStream.getName()) + .setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())) + .build()) { + // Append once before finalizing the stream + ApiFuture response = + streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0); + response.get(); + // Finalize the stream in order to trigger STREAM_FINALIZED error + writeClient.finalizeWriteStream( + FinalizeWriteStreamRequest.newBuilder().setName(writeStream.getName()).build()); + // Try to append to a finalized stream + ApiFuture response2 = + streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 1); + ExecutionException e = assertThrows(ExecutionException.class, () -> response2.get()); + assertEquals(Exceptions.StreamFinalizedException.class, e.getCause().getClass()); + Exceptions.StreamFinalizedException actualError = (StreamFinalizedException) e.getCause(); + assertNotNull(actualError.getStreamName()); + // This verifies that the Beam connector can consume this custom exception's grpc StatusCode + assertEquals(Code.INVALID_ARGUMENT, Status.fromThrowable(e.getCause()).getCode()); + assertThat(e.getCause().getMessage()).contains("Stream has been finalized"); + } + } + + @Test + void testOffsetAlreadyExistsError() throws IOException, ExecutionException, InterruptedException { + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(tableId) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + try (StreamWriter streamWriter = + StreamWriter.newBuilder(writeStream.getName()) + .setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())) + .build()) { + // Append once with correct offset + ApiFuture response = + streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0); + response.get(); + // Append again with the same offset + ApiFuture response2 = + streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0); + ExecutionException e = assertThrows(ExecutionException.class, () -> response2.get()); + assertEquals(Exceptions.OffsetAlreadyExists.class, e.getCause().getClass()); + Exceptions.OffsetAlreadyExists actualError = (OffsetAlreadyExists) e.getCause(); + assertNotNull(actualError.getStreamName()); + assertEquals(1, actualError.getExpectedOffset()); + assertEquals(0, actualError.getActualOffset()); + assertEquals(Code.ALREADY_EXISTS, Status.fromThrowable(e.getCause()).getCode()); + assertThat(e.getCause().getMessage()) + .contains("The offset is within stream, expected offset 1, received 0"); + } + } + + @Test + void testOffsetOutOfRangeError() throws IOException, InterruptedException { + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(tableId) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + try (StreamWriter streamWriter = + StreamWriter.newBuilder(writeStream.getName()) + .setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())) + .build()) { + // Append with an out of range offset + ApiFuture response = + streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 10); + ExecutionException e = assertThrows(ExecutionException.class, () -> response.get()); + assertEquals(Exceptions.OffsetOutOfRange.class, e.getCause().getClass()); + Exceptions.OffsetOutOfRange actualError = (OffsetOutOfRange) e.getCause(); + assertNotNull(actualError.getStreamName()); + assertEquals(0, actualError.getExpectedOffset()); + assertEquals(10, actualError.getActualOffset()); + assertEquals(Code.OUT_OF_RANGE, Status.fromThrowable(e.getCause()).getCode()); + assertThat(e.getCause().getMessage()) + .contains("The offset is beyond stream, expected offset 0, received 10"); + } + } + + @Test + void testStreamReconnect() throws IOException, InterruptedException, ExecutionException { + WriteStream writeStream = + writeClient.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(tableId) + .setWriteStream( + WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) + .build()); + try (StreamWriter streamWriter = + StreamWriter.newBuilder(writeStream.getName()) + .setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())) + .build()) { + ApiFuture response = + streamWriter.append(createProtoRows(new String[] {"aaa"}), 0L); + assertEquals(0L, response.get().getAppendResult().getOffset().getValue()); + } + + try (StreamWriter streamWriter = + StreamWriter.newBuilder(writeStream.getName()) + .setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())) + .build()) { + // Currently there is a bug that reconnection must wait 5 seconds to get the real row count. + Thread.sleep(5000L); + ApiFuture response = + streamWriter.append(createProtoRows(new String[] {"bbb"}), 1L); + assertEquals(1L, response.get().getAppendResult().getOffset().getValue()); + } + } + + @Test + void testMultiplexingMixedLocation() + throws IOException, InterruptedException, ExecutionException { + ConnectionWorkerPool.setOptions( + ConnectionWorkerPool.Settings.builder() + .setMinConnectionsPerRegion(1) + .setMaxConnectionsPerRegion(2) + .build()); + String defaultStream1 = + String.format( + "projects/%s/datasets/%s/tables/%s/streams/_default", + ServiceOptions.getDefaultProjectId(), DATASET, TABLE); + String defaultStream2 = + String.format( + "projects/%s/datasets/%s/tables/%s/streams/_default", + ServiceOptions.getDefaultProjectId(), DATASET, TABLE2); + String defaultStream3 = + String.format( + "projects/%s/datasets/%s/tables/%s/streams/_default", + ServiceOptions.getDefaultProjectId(), DATASET_EU, TABLE); + + StreamWriter streamWriter1 = + StreamWriter.newBuilder(defaultStream1) + .setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())) + .setEnableConnectionPool(true) + .setTraceId(TEST_TRACE_ID) + .build(); + StreamWriter streamWriter2 = + StreamWriter.newBuilder(defaultStream2) + .setWriterSchema(ProtoSchemaConverter.convert(ComplicateType.getDescriptor())) + .setEnableConnectionPool(true) + .setTraceId(TEST_TRACE_ID) + .build(); + StreamWriter streamWriter3 = + StreamWriter.newBuilder(defaultStream3) + .setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())) + .setEnableConnectionPool(true) + .setTraceId(TEST_TRACE_ID) + .build(); + ApiFuture response1 = + streamWriter1.append(createProtoRows(new String[] {"aaa"})); + ApiFuture response2 = + streamWriter2.append(createProtoRowsComplex(new String[] {"aaa"})); + ApiFuture response3 = + streamWriter3.append(createProtoRows(new String[] {"bbb"})); + assertEquals(0L, response1.get().getAppendResult().getOffset().getValue()); + assertEquals(0L, response2.get().getAppendResult().getOffset().getValue()); + assertEquals(0L, response3.get().getAppendResult().getOffset().getValue()); + assertEquals("us", streamWriter1.getLocation()); + assertEquals("us", streamWriter2.getLocation()); + assertEquals("eu", streamWriter3.getLocation()); + streamWriter1.close(); + streamWriter2.close(); + streamWriter3.close(); + } + + @Test + void testLargeRequest() throws IOException, InterruptedException, ExecutionException { + String tableName = "largeRequestTable"; + TableId tableId = TableId.of(DATASET, tableName); + Field col1 = Field.newBuilder("col1", StandardSQLTypeName.STRING).build(); + Schema originalSchema = Schema.of(col1); + TableInfo tableInfo = + TableInfo.newBuilder(tableId, StandardTableDefinition.of(originalSchema)).build(); + bigquery.create(tableInfo); + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + try (StreamWriter streamWriter = + StreamWriter.newBuilder(parent.toString() + "/_default") + .setWriterSchema(createProtoSchemaWithColField()) + .build()) { + List sizeSet = Arrays.asList(15 * 1024 * 1024, 1024); + List> responseList = + new ArrayList>(); + Random r = new Random(); + for (int i = 0; i < 50; i++) { + int size = sizeSet.get(r.nextInt(2)); + LOG.info("Sending size: " + size); + responseList.add( + streamWriter.append( + createProtoRows( + new String[] { + new String(new char[size]).replace('\u0000', (char) (r.nextInt(26) + 'a')) + }))); + } + for (int i = 0; i < 50; i++) { + assertFalse(responseList.get(i).get().hasError()); + } + TableResult queryResult = + bigquery.query( + QueryJobConfiguration.newBuilder("SELECT count(*) from " + DATASET + '.' + tableName) + .build()); + Iterator queryIter = queryResult.getValues().iterator(); + assertTrue(queryIter.hasNext()); + assertEquals("50", queryIter.next().get(0).getStringValue()); + } + } + + // Tests that inputs for micro and picos are able to use Arrow to write + // to BQ + @Test + void timestamp_arrowWrite() throws IOException { + String tableName = "bqstorage_timestamp_write_arrow"; + // Opt to create a new table to write to instead of re-using table to prevent + // the test from failing due to any issues with deleting data after test. + // Increases the test time duration, but would be more resilient to transient + // failures + createTimestampTable(tableName); + + // Define the fields as Arrow types that are compatible with BQ Schema types + List fields = + ImmutableList.of( + new org.apache.arrow.vector.types.pojo.Field( + TIMESTAMP_COLUMN_NAME, + FieldType.nullable( + new ArrowType.Timestamp( + org.apache.arrow.vector.types.TimeUnit.MICROSECOND, "UTC")), + null), + new org.apache.arrow.vector.types.pojo.Field( + TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME, + FieldType.nullable( + new ArrowType.Timestamp( + org.apache.arrow.vector.types.TimeUnit.NANOSECOND, "UTC")), + null)); + org.apache.arrow.vector.types.pojo.Schema arrowSchema = + new org.apache.arrow.vector.types.pojo.Schema(fields, null); + + int numRows = INPUT_ARROW_WRITE_TIMESTAMPS.length; + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + try (StreamWriter streamWriter = + StreamWriter.newBuilder(parent.toString() + "/_default") + .setWriterSchema(arrowSchema) + .build()) { + try (VectorSchemaRoot root = VectorSchemaRoot.create(arrowSchema, allocator)) { + TimeStampMicroTZVector timestampVector = + (TimeStampMicroTZVector) root.getVector(TIMESTAMP_COLUMN_NAME); + TimeStampNanoTZVector timestampHigherPrecisionVector = + (TimeStampNanoTZVector) root.getVector(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME); + timestampVector.allocateNew(numRows); + timestampHigherPrecisionVector.allocateNew(numRows); + + for (int i = 0; i < numRows; i++) { + timestampVector.set(i, (Long) INPUT_ARROW_WRITE_TIMESTAMPS[i][0]); + timestampHigherPrecisionVector.set(i, (Long) INPUT_ARROW_WRITE_TIMESTAMPS[i][1]); + } + root.setRowCount(numRows); + + CompressionCodec codec = + NoCompressionCodec.Factory.INSTANCE.createCodec( + CompressionUtil.CodecType.NO_COMPRESSION); + VectorUnloader vectorUnloader = + new VectorUnloader(root, /* includeNullCount= */ true, codec, /* alignBuffers= */ true); + org.apache.arrow.vector.ipc.message.ArrowRecordBatch batch = + vectorUnloader.getRecordBatch(); + // Asynchronous append. + ApiFuture future = streamWriter.append(batch); + ApiFutures.addCallback( + future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor()); + } + } + assertTimestamps(tableName, EXPECTED_ARROW_WRITE_TIMESTAMPS_ISO_OUTPUT); + } + + // Tests that inputs for micro and picos are able to converted to protobuf + // and written to BQ + @Test + void timestamp_protobufWrite() + throws IOException, DescriptorValidationException, InterruptedException { + String tableName = "bqstorage_timestamp_write_protobuf_schema_aware"; + // Opt to create a new table to write to instead of re-using table to prevent + // the test from failing due to any issues with deleting data after test. + // Increases the test time duration, but would be more resilient to transient + // failures + createTimestampTable(tableName); + + // Define the table schema so that the automatic converter is able to + // determine how to convert from Json -> Protobuf + TableFieldSchema testTimestamp = + TableFieldSchema.newBuilder() + .setName(TIMESTAMP_COLUMN_NAME) + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + TableFieldSchema testTimestampHighPrecision = + TableFieldSchema.newBuilder() + .setName(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME) + .setTimestampPrecision( + Int64Value.newBuilder().setValue(Helper.PICOSECOND_PRECISION).build()) + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields(testTimestamp) + .addFields(testTimestampHighPrecision) + .build(); + + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(parent.toString(), tableSchema).build()) { + + // Creates a single payload to append (JsonArray with multiple JsonObjects) + // Each JsonObject contains a row (one micros, one picos) + JSONArray jsonArray = new JSONArray(); + for (Object[] timestampData : Helper.INPUT_TIMESTAMPS) { + JSONObject row = new JSONObject(); + row.put(TIMESTAMP_COLUMN_NAME, timestampData[0]); + row.put(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME, timestampData[1]); + jsonArray.put(row); + } + ApiFuture future = jsonStreamWriter.append(jsonArray); + ApiFutures.addCallback( + future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor()); + } + assertTimestamps(tableName, EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT); + } + + // Tests that users can use a Protobuf message that contains second a fractional + // part (pico) to be written to BQ + @Test + void timestamp_protobufWrite_customMessage_higherPrecision() + throws IOException, DescriptorValidationException { + String tableName = "bqstorage_timestamp_write_protobuf_custom_descriptor"; + // Opt to create a new table to write to instead of re-using table to prevent + // the test from failing due to any issues with deleting data after test. + // Increases the test time duration, but would be more resilient to transient + // failures + createTimestampTable(tableName); + + /* + A sample protobuf format: + message Wrapper { + message TimestampPicos { + int64 seconds = 1; + int64 picoseconds = 2; + } + Wrapper timestampHigherPrecision = 1; + // ... + } + */ + String wrapperProtoName = "Wrapper"; + String timestampPicosProtoName = "TimestampPicos"; + String secondsProtoName = "seconds"; + String picosProtoName = "picoseconds"; + DescriptorProto timestampPicosDescriptor = + DescriptorProto.newBuilder() + .setName(timestampPicosProtoName) + .addField( + DescriptorProtos.FieldDescriptorProto.newBuilder() + .setName(secondsProtoName) + .setNumber(1) + .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64) + .build()) + .addField( + DescriptorProtos.FieldDescriptorProto.newBuilder() + .setName(picosProtoName) + .setNumber(2) + .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64) + .build()) + .build(); + DescriptorProto wrapperDescriptor = + DescriptorProto.newBuilder() + .setName(wrapperProtoName) // random name + .addField( + DescriptorProtos.FieldDescriptorProto.newBuilder() + .setName(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME) + .setNumber(3) + .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE) + .setTypeName(timestampPicosDescriptor.getName()) + .build()) + .addNestedType(timestampPicosDescriptor) + .build(); + ProtoSchema protoSchema = + ProtoSchema.newBuilder().setProtoDescriptor(wrapperDescriptor).build(); + + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + try (StreamWriter streamWriter = + StreamWriter.newBuilder(parent.toString() + "/_default", writeClient) + .setWriterSchema(protoSchema) + .build()) { + DescriptorProtos.FileDescriptorProto fileProto = + DescriptorProtos.FileDescriptorProto.newBuilder() + .setName("test.proto") // dummy proto file + .addMessageType(wrapperDescriptor) + .build(); + + // Build the runtime descriptor (resolves types and names) + Descriptors.FileDescriptor file = + Descriptors.FileDescriptor.buildFrom(fileProto, new Descriptors.FileDescriptor[] {}); + + // Get the handle to the "wrapper" message type + Descriptors.Descriptor descriptor = file.findMessageTypeByName(wrapperProtoName); + + ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); + for (Long[] timestampParts : INPUT_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS) { + Message message = + DynamicMessage.newBuilder(descriptor) + .setField( + descriptor.findFieldByName(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME), + DynamicMessage.newBuilder( + descriptor.findNestedTypeByName(timestampPicosProtoName)) + .setField( + descriptor + .findNestedTypeByName(timestampPicosProtoName) + .findFieldByName(secondsProtoName), + timestampParts[0]) + .setField( + descriptor + .findNestedTypeByName(timestampPicosProtoName) + .findFieldByName(picosProtoName), + timestampParts[1]) + .build()) + .build(); + rowsBuilder.addSerializedRows(message.toByteString()); + } + ApiFuture future = streamWriter.append(rowsBuilder.build()); + ApiFutures.addCallback( + future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor()); + } + String table = + BigQueryResource.formatTableResource( + ServiceOptions.getDefaultProjectId(), DATASET, tableName); + + // Read all the data as Avro GenericRecords + List rows = Helper.readAllRows(readClient, parentProjectId, table, null); + List timestampHigherPrecision = + rows.stream() + .map(x -> x.get(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME).toString()) + .collect(Collectors.toList()); + assertEquals( + EXPECTED_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS_HIGH_PRECISION_ISO_OUTPUT.length, + timestampHigherPrecision.size()); + for (int i = 0; + i < EXPECTED_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS_HIGH_PRECISION_ISO_OUTPUT.length; + i++) { + assertEquals( + EXPECTED_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS_HIGH_PRECISION_ISO_OUTPUT[i], + timestampHigherPrecision.get(i)); + } + } + + private void createTimestampTable(String tableName) { + Schema bqTableSchema = + Schema.of( + Field.newBuilder(TIMESTAMP_COLUMN_NAME, StandardSQLTypeName.TIMESTAMP) + .setMode(Mode.NULLABLE) + .build(), + Field.newBuilder(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME, StandardSQLTypeName.TIMESTAMP) + .setMode(Mode.NULLABLE) + .setTimestampPrecision(Helper.PICOSECOND_PRECISION) + .build()); + + TableId testTableId = TableId.of(DATASET, tableName); + bigquery.create( + TableInfo.of( + testTableId, StandardTableDefinition.newBuilder().setSchema(bqTableSchema).build())); + } + + private void assertTimestamps(String tableName, Object[][] expected) throws IOException { + String table = + BigQueryResource.formatTableResource( + ServiceOptions.getDefaultProjectId(), DATASET, tableName); + + // Read all the data as Avro GenericRecords + List rows = Helper.readAllRows(readClient, parentProjectId, table, null); + + // Each timestamp response is expected to contain two fields: + // 1. Micros from timestamp as a Long and 2. ISO8601 instant with picos precision + List timestamps = + rows.stream().map(x -> (Long) x.get(TIMESTAMP_COLUMN_NAME)).collect(Collectors.toList()); + List timestampHigherPrecision = + rows.stream() + .map(x -> x.get(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME).toString()) + .collect(Collectors.toList()); + + assertEquals(expected.length, timestamps.size()); + assertEquals(expected.length, timestampHigherPrecision.size()); + for (int i = 0; i < timestampHigherPrecision.size(); i++) { + assertEquals(expected[i][0], timestamps.get(i)); + assertEquals(expected[i][1], timestampHigherPrecision.get(i)); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryTimeEncoderTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryTimeEncoderTest.java new file mode 100644 index 000000000000..17322f33ca07 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryTimeEncoderTest.java @@ -0,0 +1,227 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; + +import com.google.api.core.ApiFuture; +import com.google.cloud.ServiceOptions; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field.Mode; +import com.google.cloud.bigquery.FieldValueList; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import com.google.cloud.bigquery.TableResult; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.CivilTimeEncoder; +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import com.google.cloud.bigquery.storage.v1.TableFieldSchema; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.cloud.bigquery.storage.v1.TableSchema; +import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; +import com.google.protobuf.Descriptors; +import java.io.IOException; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.util.Iterator; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.json.JSONArray; +import org.json.JSONObject; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +class ITBigQueryTimeEncoderTest { + private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); + private static final String TABLE = "testtable"; + private static final String DESCRIPTION = "BigQuery Write Java manual client test dataset"; + + private static BigQueryWriteClient client; + private static TableInfo tableInfo; + private static BigQuery bigquery; + + @BeforeAll + static void beforeAll() throws IOException { + client = BigQueryWriteClient.create(); + + RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); + bigquery = bigqueryHelper.getOptions().getService(); + DatasetInfo datasetInfo = + DatasetInfo.newBuilder(/* datasetId= */ DATASET).setDescription(DESCRIPTION).build(); + bigquery.create(datasetInfo); + tableInfo = + TableInfo.newBuilder( + TableId.of(DATASET, TABLE), + StandardTableDefinition.of( + Schema.of( + com.google.cloud.bigquery.Field.newBuilder( + "test_str", StandardSQLTypeName.STRING) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_time_micros", StandardSQLTypeName.TIME) + .setMode(Mode.REPEATED) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_datetime_micros", StandardSQLTypeName.DATETIME) + .setMode(Mode.REPEATED) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_date_repeated", StandardSQLTypeName.DATE) + .setMode(Mode.REPEATED) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "test_date", StandardSQLTypeName.DATE) + .setMode(Mode.NULLABLE) + .build()))) + .build(); + bigquery.create(tableInfo); + } + + @AfterAll + static void afterAll() throws InterruptedException { + if (client != null) { + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + if (bigquery != null) { + RemoteBigQueryHelper.forceDelete(bigquery, DATASET); + } + } + + @Test + void TestTimeEncoding() + throws IOException, + InterruptedException, + ExecutionException, + Descriptors.DescriptorValidationException { + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, TABLE); + TableFieldSchema TEST_STRING = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_str") + .build(); + TableFieldSchema TEST_TIME = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIME) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_time_micros") + .build(); + TableFieldSchema TEST_DATETIME = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DATETIME) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_datetime_micros") + .build(); + TableFieldSchema TEST_DATE_REPEATED = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DATE) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_date_repeated") + .build(); + TableFieldSchema TEST_DATE = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.DATE) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_date") + .build(); + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields(0, TEST_STRING) + .addFields(1, TEST_TIME) + .addFields(2, TEST_DATETIME) + .addFields(3, TEST_DATE_REPEATED) + .addFields(4, TEST_DATE) + .build(); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(parent.toString(), tableSchema).build()) { + JSONObject row = new JSONObject(); + row.put("test_str", "Start of the day"); + row.put( + "test_time_micros", + new JSONArray( + new long[] { + CivilTimeEncoder.encodePacked64TimeMicrosLocalTime( + LocalTime.of(13, 14, 15, 16_000_000)), + CivilTimeEncoder.encodePacked64TimeMicrosLocalTime( + LocalTime.of(23, 59, 59, 999_999_000)), + CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(0, 0, 0, 0)), + CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(1, 2, 3, 4_000)), + CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(5, 6, 7, 8_000)) + })); + row.put( + "test_datetime_micros", + new JSONArray( + new long[] { + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.of(1, 1, 1, 12, 0, 0, 0)), + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.of(1995, 5, 19, 10, 30, 45, 0)), + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.of(2000, 1, 1, 0, 0, 0, 0)), + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.of(2026, 3, 11, 5, 45, 12, 9_000_000)), + CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( + LocalDateTime.of(2050, 1, 2, 3, 4, 5, 6_000)), + })); + row.put("test_date_repeated", new JSONArray(new int[] {0, 300, 14238})); + row.put("test_date", 300); + JSONArray jsonArr = new JSONArray(new JSONObject[] {row}); + ApiFuture response = jsonStreamWriter.append(jsonArr, -1); + assertFalse(response.get().getAppendResult().hasOffset()); + TableResult result = + bigquery.listTableData( + tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); + Iterator iter = result.getValues().iterator(); + FieldValueList currentRow; + currentRow = iter.next(); + assertEquals("Start of the day", currentRow.get(0).getValue()); + assertEquals("13:14:15.016000", currentRow.get(1).getRepeatedValue().get(0).getStringValue()); + assertEquals("23:59:59.999999", currentRow.get(1).getRepeatedValue().get(1).getStringValue()); + assertEquals("00:00:00", currentRow.get(1).getRepeatedValue().get(2).getStringValue()); + assertEquals("01:02:03.000004", currentRow.get(1).getRepeatedValue().get(3).getStringValue()); + assertEquals("05:06:07.000008", currentRow.get(1).getRepeatedValue().get(4).getStringValue()); + + assertEquals( + "0001-01-01T12:00:00", currentRow.get(2).getRepeatedValue().get(0).getStringValue()); + assertEquals( + "1995-05-19T10:30:45", currentRow.get(2).getRepeatedValue().get(1).getStringValue()); + assertEquals( + "2000-01-01T00:00:00", currentRow.get(2).getRepeatedValue().get(2).getStringValue()); + + assertEquals("1970-01-01", currentRow.get(3).getRepeatedValue().get(0).getStringValue()); + assertEquals("1970-10-28", currentRow.get(3).getRepeatedValue().get(1).getStringValue()); + assertEquals("2008-12-25", currentRow.get(3).getRepeatedValue().get(2).getStringValue()); + + assertEquals("1970-10-28", currentRow.get(4).getStringValue()); + + assertEquals( + "2026-03-11T05:45:12.009000", + currentRow.get(2).getRepeatedValue().get(3).getStringValue()); + assertEquals( + "2050-01-02T03:04:05.000006", + currentRow.get(2).getRepeatedValue().get(4).getStringValue()); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteNonQuotaRetryTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteNonQuotaRetryTest.java new file mode 100644 index 000000000000..46b1e59577c6 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteNonQuotaRetryTest.java @@ -0,0 +1,203 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import com.google.api.core.ApiFuture; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.LegacySQLTypeName; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import com.google.cloud.bigquery.storage.test.Test.FooType; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.ProtoRows; +import com.google.cloud.bigquery.storage.v1.ProtoSchema; +import com.google.cloud.bigquery.storage.v1.StreamWriter; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.cloud.bigquery.storage.v1.it.util.WriteRetryTestUtil; +import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; +import com.google.protobuf.DescriptorProtos.DescriptorProto; +import com.google.protobuf.DescriptorProtos.FieldDescriptorProto; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import io.grpc.Status.Code; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +/** Integration tests for BigQuery Write API. */ +class ITBigQueryWriteNonQuotaRetryTest { + private static final Logger LOG = Logger.getLogger(ITBigQueryWriteQuotaRetryTest.class.getName()); + private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); + private static final String TABLE = "testtable"; + private static final String DESCRIPTION = "BigQuery Write Java manual client test dataset"; + // This project is configured on the server to inject INTERNAL in-stream errors every + // 10 messages. This is done to verify in-stream message retries. + private static final String NON_QUOTA_RETRY_PROJECT_ID = "bq-write-api-java-retry-test"; + private static BigQueryWriteClient client; + private static BigQuery bigquery; + + @BeforeAll + static void beforeAll() throws IOException { + client = BigQueryWriteClient.create(); + + RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); + bigquery = bigqueryHelper.getOptions().getService(); + DatasetInfo datasetInfo = + DatasetInfo.newBuilder(/* datasetId= */ DATASET).setDescription(DESCRIPTION).build(); + bigquery.create(datasetInfo); + LOG.info("Created test dataset: " + DATASET); + TableInfo tableInfo = + TableInfo.newBuilder( + TableId.of(DATASET, TABLE), + StandardTableDefinition.of( + Schema.of( + Field.newBuilder("foo", LegacySQLTypeName.STRING) + .setMode(Field.Mode.NULLABLE) + .build()))) + .build(); + bigquery.create(tableInfo); + } + + @AfterAll + static void afterAll() throws InterruptedException { + if (client != null) { + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + + if (bigquery != null) { + RemoteBigQueryHelper.forceDelete(bigquery, DATASET); + LOG.info("Deleted test dataset: " + DATASET); + } + } + + ProtoRows CreateProtoRows(String[] messages) { + ProtoRows.Builder rows = ProtoRows.newBuilder(); + for (String message : messages) { + FooType foo = FooType.newBuilder().setFoo(message).build(); + rows.addSerializedRows(foo.toByteString()); + } + return rows.build(); + } + + @Test + void testJsonStreamWriterCommittedStreamWithNonQuotaRetry() + throws IOException, InterruptedException, DescriptorValidationException { + WriteRetryTestUtil.runExclusiveRetryTest( + bigquery, + client, + DATASET, + NON_QUOTA_RETRY_PROJECT_ID, + WriteStream.Type.COMMITTED, + /* requestCount= */ 901, + /* rowBatchSize= */ 1); + } + + @Test + void testJsonStreamWriterDefaultStreamWithNonQuotaRetry() + throws IOException, InterruptedException, DescriptorValidationException { + WriteRetryTestUtil.runDefaultRetryTest( + bigquery, + client, + DATASET, + NON_QUOTA_RETRY_PROJECT_ID, + /* requestCount= */ 901, + /* rowBatchSize= */ 1); + } + + // Moved to ITBigQueryWriteNonQuotaRetryTest from ITBigQueryWriteClientTest, as it requires + // usage of the project this file uses to inject errors (bq-write-api-java-retry-test). + @Test + void testDefaultRequestLimit() throws IOException, InterruptedException, ExecutionException { + DatasetId datasetId = + DatasetId.of(NON_QUOTA_RETRY_PROJECT_ID, RemoteBigQueryHelper.generateDatasetName()); + DatasetInfo datasetInfo = DatasetInfo.newBuilder(datasetId).build(); + bigquery.create(datasetInfo); + try { + String tableName = "no_error_table"; + TableId tableId = TableId.of(datasetId.getProject(), datasetId.getDataset(), tableName); + Field col1 = Field.newBuilder("col1", StandardSQLTypeName.STRING).build(); + Schema originalSchema = Schema.of(col1); + TableInfo tableInfo = + TableInfo.newBuilder(tableId, StandardTableDefinition.of(originalSchema)).build(); + bigquery.create(tableInfo); + ProtoSchema schema = + ProtoSchema.newBuilder() + .setProtoDescriptor( + DescriptorProto.newBuilder() + .setName("testProto") + .addField( + FieldDescriptorProto.newBuilder() + .setName("col1") + .setNumber(1) + .setType(FieldDescriptorProto.Type.TYPE_STRING) + .build()) + .build()) + .build(); + TableName parent = TableName.of(datasetId.getProject(), datasetId.getDataset(), tableName); + try (StreamWriter streamWriter = + StreamWriter.newBuilder(parent.toString() + "/_default") + .setWriterSchema(schema) + .build()) { + ApiFuture response = + streamWriter.append( + CreateProtoRows( + new String[] {new String(new char[19 * 1024 * 1024]).replace("\0", "a")})); + ExecutionException ex = assertThrows(ExecutionException.class, () -> response.get()); + LOG.info( + "Message failed. Dataset info: " + + datasetInfo.toString() + + " tableinfo: " + + tableInfo.toString() + + " parent: " + + parent + + "streamWriter: " + + streamWriter); + assertEquals(io.grpc.StatusRuntimeException.class, ex.getCause().getClass()); + io.grpc.StatusRuntimeException actualError = (io.grpc.StatusRuntimeException) ex.getCause(); + // This verifies that the Beam connector can consume this custom exception's grpc + // StatusCode + // TODO(yiru): temp fix to unblock test, while final fix is being rolled out. + if (actualError.getStatus().getCode() != Code.INTERNAL) { + assertEquals(Code.INVALID_ARGUMENT, actualError.getStatus().getCode()); + assertThat( + actualError + .getStatus() + .getDescription() + .contains("AppendRows request too large: 19923131 limit 10485760")); + } + } + } finally { + RemoteBigQueryHelper.forceDelete(bigquery, datasetId.toString()); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteQuotaRetryTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteQuotaRetryTest.java new file mode 100644 index 000000000000..f082bda913cb --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteQuotaRetryTest.java @@ -0,0 +1,110 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.LegacySQLTypeName; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.cloud.bigquery.storage.v1.it.util.WriteRetryTestUtil; +import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +/** Integration tests for BigQuery Write API. */ +public class ITBigQueryWriteQuotaRetryTest { + private static final Logger LOG = Logger.getLogger(ITBigQueryWriteQuotaRetryTest.class.getName()); + private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); + private static final String TABLE = "testtable"; + private static final String DESCRIPTION = "BigQuery Write Java manual client test dataset"; + // This project is configured on the server to inject RESOURCE_EXHAUSTED in-stream errors every + // 10 messages. This is done to verify in-stream message retries. + private static final String QUOTA_RETRY_PROJECT_ID = "bq-writeapi-java-quota-retry"; + private static BigQueryWriteClient client; + private static BigQuery bigquery; + + @BeforeAll + static void beforeAll() throws IOException { + client = BigQueryWriteClient.create(); + + RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); + bigquery = bigqueryHelper.getOptions().getService(); + DatasetInfo datasetInfo = + DatasetInfo.newBuilder(/* datasetId= */ DATASET).setDescription(DESCRIPTION).build(); + bigquery.create(datasetInfo); + LOG.info("Created test dataset: " + DATASET); + TableInfo tableInfo = + TableInfo.newBuilder( + TableId.of(DATASET, TABLE), + StandardTableDefinition.of( + Schema.of( + Field.newBuilder("foo", LegacySQLTypeName.STRING) + .setMode(Field.Mode.NULLABLE) + .build()))) + .build(); + bigquery.create(tableInfo); + } + + @AfterAll + static void afterAll() throws InterruptedException { + if (client != null) { + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + + if (bigquery != null) { + RemoteBigQueryHelper.forceDelete(bigquery, DATASET); + LOG.info("Deleted test dataset: " + DATASET); + } + } + + @Test + void testJsonStreamWriterCommittedStreamWithQuotaRetry() + throws IOException, InterruptedException, DescriptorValidationException { + WriteRetryTestUtil.runExclusiveRetryTest( + bigquery, + client, + DATASET, + QUOTA_RETRY_PROJECT_ID, + WriteStream.Type.COMMITTED, + /* requestCount= */ 901, + /* rowBatchSize= */ 1); + } + + @Test + void testJsonStreamWriterDefaultStreamWithQuotaRetry() + throws IOException, InterruptedException, DescriptorValidationException { + WriteRetryTestUtil.runDefaultRetryTest( + bigquery, + client, + DATASET, + QUOTA_RETRY_PROJECT_ID, + /* requestCount= */ 901, + /* rowBatchSize= */ 1); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/BigQueryResource.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/BigQueryResource.java new file mode 100644 index 000000000000..5d3d1fbdc5df --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/BigQueryResource.java @@ -0,0 +1,34 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it.util; + +/** Test helper class to generate BigQuery resource paths. */ +public class BigQueryResource { + + /** + * Returns a BigQuery table resource path from the provided parameters into the following format: + * projects/{projectId}/datasets/{datasetId}/tables/{tableId} + * + * @param projectId + * @param datasetId + * @param tableId + * @return a path to a table resource. + */ + public static String formatTableResource(String projectId, String datasetId, String tableId) { + return String.format("projects/%s/datasets/%s/tables/%s", projectId, datasetId, tableId); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/Helper.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/Helper.java new file mode 100644 index 000000000000..75b172f67a8e --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/Helper.java @@ -0,0 +1,204 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import com.google.api.core.ApiFutureCallback; +import com.google.api.gax.rpc.ServerStream; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.AvroSerializationOptions; +import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.DataFormat; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.common.base.Preconditions; +import com.google.protobuf.util.Timestamps; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericRecordBuilder; + +public class Helper { + + public static final long PICOSECOND_PRECISION = 12; + public static final String TIMESTAMP_COLUMN_NAME = "timestamp"; + public static final String TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME = "timestampHigherPrecision"; + + // Sample test cases for timestamps. First element is micros from epcoh and the second element + // is the ISO format in with picosecond precision + public static final Object[][] INPUT_TIMESTAMPS = + new Object[][] { + {1735734896123456L /* 2025-01-01T12:34:56.123456Z */, "2025-01-01T12:34:56.123456789123Z"}, + {1580646896123456L /* 2020-02-02T12:34:56.123456Z */, "2020-02-02T12:34:56.123456789123Z"}, + {636467696123456L /* 1990-03-03T12:34:56.123456Z */, "1990-03-03T12:34:56.123456789123Z"}, + {165846896123456L /* 1975-04-04T12:34:56.123456Z */, "1975-04-04T12:34:56.123456789123Z"} + }; + + // Expected response for timestamps from the input. If enabled with ISO as output, it will + // ISO8601 format for any picosecond enabled column. + public static final Object[][] EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT = + new Object[][] { + {1735734896123456L /* 2025-01-01T12:34:56.123456Z */, "2025-01-01T12:34:56.123456789123Z"}, + {1580646896123456L /* 2020-02-02T12:34:56.123456Z */, "2020-02-02T12:34:56.123456789123Z"}, + {636467696123456L /* 1990-03-03T12:34:56.123456Z */, "1990-03-03T12:34:56.123456789123Z"}, + {165846896123456L /* 1975-04-04T12:34:56.123456Z */, "1975-04-04T12:34:56.123456789123Z"} + }; + + public static ServiceAccountCredentials loadCredentials(String credentialFile) { + try (InputStream keyStream = new ByteArrayInputStream(credentialFile.getBytes())) { + return ServiceAccountCredentials.fromStream(keyStream); + } catch (IOException e) { + fail("Couldn't create fake JSON credentials."); + } + return null; + } + + public static class AppendCompleteCallback implements ApiFutureCallback { + private final Object lock = new Object(); + private int batchCount = 0; + + public void onSuccess(AppendRowsResponse response) { + synchronized (lock) { + if (response.hasError()) { + System.out.format("Error: %s\n", response.getError()); + } else { + ++batchCount; + System.out.format("Wrote batch %d\n", batchCount); + } + } + } + + public void onFailure(Throwable throwable) { + System.out.format("Error: %s\n", throwable.toString()); + } + } + + /** + * Reads all the rows from the specified table. + * + *

For every row, the consumer is called for processing. + * + * @param table + * @param snapshotInMillis Optional. If specified, all rows up to timestamp will be returned. + * @param filter Optional. If specified, it will be used to restrict returned data. + * @param consumer that receives all Avro rows. + * @throws IOException + */ + public static void processRowsAtSnapshot( + BigQueryReadClient client, + String parentProjectId, + String table, + Long snapshotInMillis, + String filter, + SimpleRowReaderAvro.AvroRowConsumer consumer) + throws IOException { + Preconditions.checkNotNull(table); + Preconditions.checkNotNull(consumer); + + CreateReadSessionRequest.Builder createSessionRequestBuilder = + CreateReadSessionRequest.newBuilder() + .setParent(parentProjectId) + .setMaxStreamCount(1) + .setReadSession( + ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .setReadOptions( + ReadSession.TableReadOptions.newBuilder() + .setAvroSerializationOptions( + AvroSerializationOptions.newBuilder() + .setPicosTimestampPrecision( + // This serialization option only impacts columns that are + // type. `TIMESTAMP_PICOS` and has no impact on other + // columns types. + AvroSerializationOptions.PicosTimestampPrecision + .TIMESTAMP_PRECISION_PICOS) + .build()) + .build()) + .build()); + + if (snapshotInMillis != null) { + createSessionRequestBuilder + .getReadSessionBuilder() + .setTableModifiers( + ReadSession.TableModifiers.newBuilder() + .setSnapshotTime(Timestamps.fromMillis(snapshotInMillis)) + .build()); + } + + if (filter != null && !filter.isEmpty()) { + createSessionRequestBuilder + .getReadSessionBuilder() + .setReadOptions( + ReadSession.TableReadOptions.newBuilder().setRowRestriction(filter).build()); + } + + ReadSession session = client.createReadSession(createSessionRequestBuilder.build()); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + SimpleRowReaderAvro reader = + new SimpleRowReaderAvro(new Schema.Parser().parse(session.getAvroSchema().getSchema())); + + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + reader.processRows(response.getAvroRows(), consumer); + } + } + + /** + * Reads all the rows from the specified table and returns a list as generic Avro records. + * + * @param table + * @param filter Optional. If specified, it will be used to restrict returned data. + * @return + */ + public static List readAllRows( + BigQueryReadClient client, String parentProjectId, String table, String filter) + throws IOException { + final List rows = new ArrayList<>(); + processRowsAtSnapshot( + client, + parentProjectId, + /* table= */ table, + /* snapshotInMillis= */ null, + /* filter= */ filter, + (SimpleRowReaderAvro.AvroRowConsumer) + record -> { + // clone the record since that reference will be reused by the reader. + rows.add(new GenericRecordBuilder(record).build()); + }); + return rows; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderArrow.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderArrow.java new file mode 100644 index 000000000000..f0dfcd5eb04c --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderArrow.java @@ -0,0 +1,219 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it.util; + +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_COLUMN_NAME; +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.bigquery.FieldElementType; +import com.google.cloud.bigquery.Range; +import com.google.cloud.bigquery.storage.v1.ArrowRecordBatch; +import com.google.cloud.bigquery.storage.v1.ArrowSchema; +import com.google.common.base.Preconditions; +import java.io.IOException; +import java.time.LocalDateTime; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.FieldVector; +import org.apache.arrow.vector.VectorLoader; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.complex.StructVector; +import org.apache.arrow.vector.ipc.ReadChannel; +import org.apache.arrow.vector.ipc.message.MessageSerializer; +import org.apache.arrow.vector.types.pojo.ArrowType; +import org.apache.arrow.vector.types.pojo.ArrowType.ArrowTypeID; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.util.ByteArrayReadableSeekableByteChannel; + +public class SimpleRowReaderArrow implements AutoCloseable { + + public interface ArrowBatchConsumer { + + /** Handler for every new Arrow batch. */ + void accept(VectorSchemaRoot root); + } + + public static class ArrowTimestampBatchConsumer implements ArrowBatchConsumer { + private final Object[][] expectedTimestampValues; + + public ArrowTimestampBatchConsumer(Object[][] expectedTimestampValues) { + this.expectedTimestampValues = expectedTimestampValues; + } + + @Override + public void accept(VectorSchemaRoot root) { + FieldVector timestampFieldVector = root.getVector(TIMESTAMP_COLUMN_NAME); + FieldVector timestampHigherPrecisionFieldVector = + root.getVector(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME); + assertThat(timestampFieldVector.getValueCount()) + .isEqualTo(timestampHigherPrecisionFieldVector.getValueCount()); + int count = timestampFieldVector.getValueCount(); + for (int i = 0; i < count; i++) { + long timestampMicros = (Long) timestampFieldVector.getObject(i); + assertThat(timestampMicros).isEqualTo(expectedTimestampValues[i][0]); + + // The Object comes back as `Text` which cannot be cast to String + // (use `toString()` instead) + String timestampHigherPrecisionISO = + timestampHigherPrecisionFieldVector.getObject(i).toString(); + assertThat(timestampHigherPrecisionISO).isEqualTo(expectedTimestampValues[i][1]); + } + } + } + + /** ArrowRangeBatchConsumer accepts batch Arrow data and validate the range values. */ + public static class ArrowRangeBatchConsumer implements ArrowBatchConsumer { + private final Map expectedRangeDateValues; + private final Map expectedRangeDatetimeValues; + private final Map expectedRangeTimestampValues; + + public ArrowRangeBatchConsumer( + Map expectedRangeDateValues, + Map expectedRangeDatetimeValues, + Map expectedRangeTimestampValues) { + this.expectedRangeDateValues = expectedRangeDateValues; + this.expectedRangeDatetimeValues = expectedRangeDatetimeValues; + this.expectedRangeTimestampValues = expectedRangeTimestampValues; + } + + @Override + public void accept(VectorSchemaRoot root) { + StructVector dateVector = (StructVector) root.getVector("date"); + for (int i = 0; i < dateVector.valueCount; i++) { + Field field = root.getSchema().findField(dateVector.getName()); + assertThat(field.getType().getTypeID()).isEqualTo(ArrowTypeID.Struct); + + Map value = dateVector.getObject(i); + Range.Builder rangeBuilder = Range.newBuilder(); + if (value.get("start") != null) { + rangeBuilder.setStart(((Integer) value.get("start")).toString()); + } + if (value.get("end") != null) { + rangeBuilder.setEnd(((Integer) value.get("end")).toString()); + } + rangeBuilder.setType(toFieldElementType(field.getChildren().get(0))); + assertThat(rangeBuilder.build()).isIn(this.expectedRangeDateValues.values()); + } + + StructVector datetimeVector = (StructVector) root.getVector("datetime"); + for (int i = 0; i < datetimeVector.valueCount; i++) { + Field field = root.getSchema().findField(datetimeVector.getName()); + assertThat(field.getType().getTypeID()).isEqualTo(ArrowTypeID.Struct); + + Map value = datetimeVector.getObject(i); + Range.Builder rangeBuilder = Range.newBuilder(); + if (value.get("start") != null) { + rangeBuilder.setStart(((LocalDateTime) value.get("start")).toString()); + } + if (value.get("end") != null) { + rangeBuilder.setEnd(((LocalDateTime) value.get("end")).toString()); + } + rangeBuilder.setType(toFieldElementType(field.getChildren().get(0))); + assertThat(rangeBuilder.build()).isIn(this.expectedRangeDatetimeValues.values()); + } + + StructVector timestampVector = (StructVector) root.getVector("timestamp"); + for (int i = 0; i < timestampVector.valueCount; i++) { + Field field = root.getSchema().findField(timestampVector.getName()); + assertThat(field.getType().getTypeID()).isEqualTo(ArrowTypeID.Struct); + + Map value = timestampVector.getObject(i); + Range.Builder rangeBuilder = Range.newBuilder(); + if (value.get("start") != null) { + rangeBuilder.setStart(((Long) value.get("start")).toString()); + } + if (value.get("end") != null) { + rangeBuilder.setEnd(((Long) value.get("end")).toString()); + } + rangeBuilder.setType(toFieldElementType(field.getChildren().get(0))); + assertThat(rangeBuilder.build()).isIn(this.expectedRangeTimestampValues.values()); + } + } + + private static FieldElementType toFieldElementType(Field field) { + switch (field.getType().getTypeID()) { + case Date: + return FieldElementType.newBuilder().setType("DATE").build(); + case Timestamp: + String timezone = ((ArrowType.Timestamp) field.getType()).getTimezone(); + if (timezone == null) { + // Datetime fields do not have timezone value. + return FieldElementType.newBuilder().setType("DATETIME").build(); + } else { + return FieldElementType.newBuilder().setType("TIMESTAMP").build(); + } + default: + return null; + } + } + } + + BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE); + + // Decoder object will be reused to avoid re-allocation and too much garbage collection. + private final VectorSchemaRoot root; + private final VectorLoader loader; + + public SimpleRowReaderArrow(ArrowSchema arrowSchema) throws IOException { + org.apache.arrow.vector.types.pojo.Schema schema = + MessageSerializer.deserializeSchema( + new ReadChannel( + new ByteArrayReadableSeekableByteChannel( + arrowSchema.getSerializedSchema().toByteArray()))); + Preconditions.checkNotNull(schema); + List vectors = new ArrayList<>(); + for (org.apache.arrow.vector.types.pojo.Field field : schema.getFields()) { + vectors.add(field.createVector(allocator)); + } + root = new VectorSchemaRoot(vectors); + loader = new VectorLoader(root); + } + + /** + * Method for processing Arrow data which validates Range values. + * + * @param batch object returned from the ReadRowsResponse. + * @param batchConsumer consumer of the batch Arrow data. + */ + public void processRows(ArrowRecordBatch batch, ArrowBatchConsumer batchConsumer) + throws IOException { + org.apache.arrow.vector.ipc.message.ArrowRecordBatch deserializedBatch = + MessageSerializer.deserializeRecordBatch( + new ReadChannel( + new ByteArrayReadableSeekableByteChannel( + batch.getSerializedRecordBatch().toByteArray())), + allocator); + + loader.load(deserializedBatch); + // Release buffers from batch (they are still held in the vectors in root). + deserializedBatch.close(); + batchConsumer.accept(root); + + // Release buffers from vectors in root. + root.clear(); + } + + @Override + public void close() { + root.close(); + allocator.close(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderAvro.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderAvro.java new file mode 100644 index 000000000000..1e8fc4575ed6 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderAvro.java @@ -0,0 +1,77 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it.util; + +import com.google.cloud.bigquery.storage.v1.AvroRows; +import com.google.common.base.Preconditions; +import java.io.IOException; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.io.BinaryDecoder; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.DecoderFactory; + +/* + * SimpleRowReader handles deserialization of the Avro-encoded row blocks transmitted + * from the storage API using a generic datum decoder. + */ +public class SimpleRowReaderAvro { + + public interface AvroRowConsumer { + + /** + * Handler for every new Avro row that is read. + * + * @param record is Avro generic record structure. Consumers should not rely on the reference + * and should copy it if needed. The record reference is reused. + */ + void accept(GenericData.Record record); + } + + private final DatumReader datumReader; + + // Decoder object will be reused to avoid re-allocation and too much garbage collection. + private BinaryDecoder decoder = null; + + // Record object will be reused. + private GenericData.Record row = null; + + public SimpleRowReaderAvro(Schema schema) { + Preconditions.checkNotNull(schema); + datumReader = new GenericDatumReader<>(schema); + } + + /** + * Processes Avro rows by calling a consumer for each decoded row. + * + * @param avroRows object returned from the ReadRowsResponse. + * @param rowConsumer consumer that accepts GenericRecord. + */ + public void processRows(AvroRows avroRows, AvroRowConsumer rowConsumer) throws IOException { + Preconditions.checkNotNull(avroRows); + Preconditions.checkNotNull(rowConsumer); + decoder = + DecoderFactory.get() + .binaryDecoder(avroRows.getSerializedBinaryRows().toByteArray(), decoder); + + while (!decoder.isEnd()) { + row = datumReader.read(row, decoder); + rowConsumer.accept(row); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/WriteRetryTestUtil.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/WriteRetryTestUtil.java new file mode 100644 index 000000000000..5bfc97149a5b --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/WriteRetryTestUtil.java @@ -0,0 +1,207 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.fail; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import com.google.cloud.bigquery.storage.v1.TableFieldSchema; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.cloud.bigquery.storage.v1.TableSchema; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.concurrent.ExecutionException; +import java.util.logging.Logger; +import org.json.JSONArray; +import org.json.JSONObject; + +public class WriteRetryTestUtil { + private static final Logger LOG = + Logger.getLogger( + com.google.cloud.bigquery.storage.v1.it.ITBigQueryWriteQuotaRetryTest.class.getName()); + + public static void runExclusiveRetryTest( + BigQuery bigquery, + BigQueryWriteClient client, + String dataset, + String projectId, + WriteStream.Type streamType, + int requestCount, + int rowBatchSize) + throws IOException, InterruptedException, DescriptorValidationException { + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelayDuration(Duration.ofMinutes(1)) + .build(); + String tableName = "RetryTest"; + TableId tableId = TableId.of(dataset, tableName); + Field col1 = Field.newBuilder("col1", StandardSQLTypeName.STRING).build(); + Schema schema = Schema.of(col1); + TableInfo tableInfo = TableInfo.newBuilder(tableId, StandardTableDefinition.of(schema)).build(); + bigquery.create(tableInfo); + TableName parent = TableName.of(projectId, dataset, tableName); + + WriteStream writeStream = + client.createWriteStream( + CreateWriteStreamRequest.newBuilder() + .setParent(parent.toString()) + .setWriteStream(WriteStream.newBuilder().setType(streamType).build()) + .build()); + ArrayList> allResponses = new ArrayList<>(requestCount); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(writeStream.getName(), writeStream.getTableSchema()) + .setRetrySettings(retrySettings) + .build()) { + for (int k = 0; k < requestCount; k++) { + JSONObject row = new JSONObject(); + row.put("col1", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); + JSONArray jsonArr = new JSONArray(); + // 3MB batch. + for (int j = 0; j < rowBatchSize; j++) { + jsonArr.put(row); + } + LOG.info("Appending: " + k + "/" + requestCount); + allResponses.add(jsonStreamWriter.append(jsonArr, k * rowBatchSize)); + } + LOG.info("Waiting for all responses to come back"); + for (int i = 0; i < requestCount; i++) { + LOG.info("Waiting for request " + i); + try { + assertEquals( + allResponses.get(i).get().getAppendResult().getOffset().getValue(), i * rowBatchSize); + } catch (ExecutionException ex) { + fail("Unexpected error " + ex); + } + } + } + } + + private static void runDefaultRetryTestInternal( + BigQuery bigquery, + BigQueryWriteClient client, + String dataset, + String projectId, + int requestCount, + int rowBatchSize, + TableName parent, + TableSchema tableSchema, + boolean enableConnectionPool) + throws IOException, InterruptedException, DescriptorValidationException { + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelayDuration(Duration.ofMinutes(1)) + .build(); + ArrayList> allResponses = new ArrayList<>(requestCount); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(parent.toString(), tableSchema) + .setIgnoreUnknownFields(true) + .setRetrySettings(retrySettings) + .setEnableConnectionPool(enableConnectionPool) + .build()) { + for (int k = 0; k < requestCount; k++) { + JSONObject row = new JSONObject(); + row.put("test_str", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); + JSONArray jsonArr = new JSONArray(); + // 3MB batch. + for (int j = 0; j < rowBatchSize; j++) { + jsonArr.put(row); + } + LOG.info("Appending: " + k + "/" + requestCount); + allResponses.add(jsonStreamWriter.append(jsonArr)); + } + LOG.info("Waiting for all responses to come back"); + for (int i = 0; i < requestCount; i++) { + LOG.info("Waiting for request " + i); + try { + assertFalse(allResponses.get(i).get().hasError()); + } catch (Exception ex) { + fail("Unexpected error " + ex); + } + } + } + } + + public static void runDefaultRetryTest( + BigQuery bigquery, + BigQueryWriteClient client, + String dataset, + String projectId, + int requestCount, + int rowBatchSize) + throws IOException, InterruptedException, DescriptorValidationException { + String tableName = "JsonTableDefaultStream"; + TableFieldSchema TEST_STRING = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.STRING) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_str") + .build(); + TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_STRING).build(); + TableInfo tableInfo = + TableInfo.newBuilder( + TableId.of(dataset, tableName), + StandardTableDefinition.of( + Schema.of(Field.newBuilder("test_str", StandardSQLTypeName.STRING).build()))) + .build(); + + bigquery.create(tableInfo); + TableName parent = TableName.of(projectId, dataset, tableName); + runDefaultRetryTestInternal( + bigquery, + client, + dataset, + projectId, + requestCount, + rowBatchSize, + parent, + tableSchema, + false /*enableConnectionPool*/); + runDefaultRetryTestInternal( + bigquery, + client, + dataset, + projectId, + requestCount, + rowBatchSize, + parent, + tableSchema, + true /*enableConnectionPool*/); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettingsTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettingsTest.java new file mode 100644 index 000000000000..00fcd82a5b0b --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettingsTest.java @@ -0,0 +1,139 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1.stub; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StatusCode.Code; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.WatchdogProvider; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse; +import java.time.Duration; +import java.util.Set; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +public class EnhancedBigQueryReadStubSettingsTest { + + @Test + void testSettingsArePreserved() { + String endpoint = "some.other.host:123"; + CredentialsProvider credentialsProvider = Mockito.mock(CredentialsProvider.class); + Duration watchdogInterval = Duration.ofSeconds(12); + WatchdogProvider watchdogProvider = Mockito.mock(WatchdogProvider.class); + + EnhancedBigQueryReadStubSettings.Builder builder = + EnhancedBigQueryReadStubSettings.newBuilder() + .setEndpoint(endpoint) + .setCredentialsProvider(credentialsProvider) + .setStreamWatchdogCheckIntervalDuration(watchdogInterval) + .setStreamWatchdogProvider(watchdogProvider); + + verifyBuilder(builder, endpoint, credentialsProvider, watchdogInterval, watchdogProvider); + + verifySettings( + builder.build(), endpoint, credentialsProvider, watchdogInterval, watchdogProvider); + + verifyBuilder( + builder.build().toBuilder(), + endpoint, + credentialsProvider, + watchdogInterval, + watchdogProvider); + } + + private void verifyBuilder( + EnhancedBigQueryReadStubSettings.Builder builder, + String endpoint, + CredentialsProvider credentialsProvider, + Duration watchdogInterval, + WatchdogProvider watchdogProvider) { + assertThat(builder.getEndpoint()).isEqualTo(endpoint); + assertThat(builder.getCredentialsProvider()).isEqualTo(credentialsProvider); + assertThat(builder.getStreamWatchdogCheckIntervalDuration()).isEqualTo(watchdogInterval); + assertThat(builder.getStreamWatchdogProvider()).isEqualTo(watchdogProvider); + + InstantiatingGrpcChannelProvider channelProvider = + (InstantiatingGrpcChannelProvider) builder.getTransportChannelProvider(); + assertThat(channelProvider.toBuilder().getMaxInboundMessageSize()).isEqualTo(Integer.MAX_VALUE); + } + + private void verifySettings( + EnhancedBigQueryReadStubSettings settings, + String endpoint, + CredentialsProvider credentialsProvider, + Duration watchdogInterval, + WatchdogProvider watchdogProvider) { + assertThat(settings.getEndpoint()).isEqualTo(endpoint); + assertThat(settings.getCredentialsProvider()).isEqualTo(credentialsProvider); + assertThat(settings.getStreamWatchdogCheckIntervalDuration()).isEqualTo(watchdogInterval); + assertThat(settings.getStreamWatchdogProvider()).isEqualTo(watchdogProvider); + + InstantiatingGrpcChannelProvider channelProvider = + (InstantiatingGrpcChannelProvider) settings.getTransportChannelProvider(); + assertThat(channelProvider.toBuilder().getMaxInboundMessageSize()).isEqualTo(Integer.MAX_VALUE); + } + + @Test + void testCreateReadSessionSettings() { + UnaryCallSettings.Builder builder = + EnhancedBigQueryReadStubSettings.newBuilder().createReadSessionSettings(); + verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); + } + + @Test + void testReadRowsSettings() { + ServerStreamingCallSettings.Builder builder = + EnhancedBigQueryReadStubSettings.newBuilder().readRowsSettings(); + assertThat(builder.getRetryableCodes()).contains(Code.UNAVAILABLE); + RetrySettings retrySettings = builder.getRetrySettings(); + assertThat(retrySettings.getInitialRetryDelayDuration()).isEqualTo(Duration.ofMillis(100L)); + assertThat(retrySettings.getRetryDelayMultiplier()).isWithin(1e-6).of(1.3); + assertThat(retrySettings.getMaxRetryDelayDuration()).isEqualTo(Duration.ofMinutes(1L)); + assertThat(retrySettings.getInitialRpcTimeoutDuration()).isEqualTo(Duration.ofDays(1L)); + assertThat(retrySettings.getRpcTimeoutMultiplier()).isWithin(1e-6).of(1.0); + assertThat(retrySettings.getMaxRpcTimeoutDuration()).isEqualTo(Duration.ofDays(1L)); + assertThat(retrySettings.getTotalTimeoutDuration()).isEqualTo(Duration.ofDays(1L)); + assertThat(builder.getIdleTimeoutDuration()).isEqualTo(Duration.ZERO); + } + + @Test + void testSplitReadStreamSettings() { + UnaryCallSettings.Builder builder = + EnhancedBigQueryReadStubSettings.newBuilder().splitReadStreamSettings(); + verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); + } + + private void verifyRetrySettings(Set retryCodes, RetrySettings retrySettings) { + assertThat(retryCodes).contains(Code.UNAVAILABLE); + assertThat(retrySettings.getTotalTimeoutDuration()).isGreaterThan(Duration.ZERO); + assertThat(retrySettings.getInitialRetryDelayDuration()).isGreaterThan(Duration.ZERO); + assertThat(retrySettings.getRetryDelayMultiplier()).isAtLeast(1.0); + assertThat(retrySettings.getMaxRetryDelayDuration()).isGreaterThan(Duration.ZERO); + assertThat(retrySettings.getInitialRpcTimeoutDuration()).isGreaterThan(Duration.ZERO); + assertThat(retrySettings.getRpcTimeoutMultiplier()).isAtLeast(1.0); + assertThat(retrySettings.getMaxRpcTimeoutDuration()).isGreaterThan(Duration.ZERO); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/ResourceHeaderTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/ResourceHeaderTest.java new file mode 100644 index 000000000000..5d1112e0dc43 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/ResourceHeaderTest.java @@ -0,0 +1,141 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1.stub; + +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.InProcessServer; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.api.gax.rpc.UnimplementedException; +import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1.BigQueryReadGrpc.BigQueryReadImplBase; +import com.google.cloud.bigquery.storage.v1.BigQueryReadSettings; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class ResourceHeaderTest { + + private static final String TEST_TABLE_REFERENCE = + "projects/project/datasets/dataset/tables/table"; + + private static final String TEST_STREAM_NAME = "streamName"; + + private static final String NAME = "resource-header-test:123-v1"; + + private static final String HEADER_NAME = "x-goog-request-params"; + + private static final Pattern READ_SESSION_NAME_PATTERN = + Pattern.compile( + ".*" + + "read_session\\.table=projects%2Fproject%2Fdatasets%2Fdataset%2Ftables%2Ftable" + + ".*"); + private static final Pattern READ_STREAM_PATTERN = + Pattern.compile(".*" + "read_stream=streamName" + ".*"); + private static final Pattern STREAM_NAME_PATTERN = + Pattern.compile(".*" + "name=streamName" + ".*"); + + private static final String TEST_HEADER_NAME = "simple-header-name"; + private static final String TEST_HEADER_VALUE = "simple-header-value"; + private static final Pattern TEST_PATTERN = Pattern.compile(".*" + TEST_HEADER_VALUE + ".*"); + + private static InProcessServer server; + + private LocalChannelProvider channelProvider; + private BigQueryReadClient client; + + @BeforeAll + public static void setUpClass() throws Exception { + server = new InProcessServer<>(new BigQueryReadImplBase() {}, NAME); + server.start(); + } + + @BeforeEach + void setUp() throws Exception { + channelProvider = LocalChannelProvider.create(NAME); + BigQueryReadSettings.Builder settingsBuilder = + BigQueryReadSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setHeaderProvider(FixedHeaderProvider.create(TEST_HEADER_NAME, TEST_HEADER_VALUE)) + .setTransportChannelProvider(channelProvider); + client = BigQueryReadClient.create(settingsBuilder.build()); + } + + @AfterEach + void tearDown() throws Exception { + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + + @AfterAll + static void tearDownClass() throws Exception { + server.stop(); + server.blockUntilShutdown(); + } + + @Test + void createReadSessionTest() { + try { + client.createReadSession( + "parents/project", ReadSession.newBuilder().setTable(TEST_TABLE_REFERENCE).build(), 1); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + verifyHeaderSent(READ_SESSION_NAME_PATTERN); + } + + @Test + void readRowsTest() { + try { + ReadRowsRequest request = + ReadRowsRequest.newBuilder().setReadStream(TEST_STREAM_NAME).setOffset(125).build(); + client.readRowsCallable().call(request); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + + verifyHeaderSent(READ_STREAM_PATTERN); + } + + @Test + void splitReadStreamTest() { + try { + client.splitReadStream(SplitReadStreamRequest.newBuilder().setName(TEST_STREAM_NAME).build()); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + + verifyHeaderSent(STREAM_NAME_PATTERN); + } + + private void verifyHeaderSent(Pattern... patterns) { + for (Pattern pattern : patterns) { + boolean headerSent = channelProvider.isHeaderSent(HEADER_NAME, pattern); + assertWithMessage("Generated header was sent").that(headerSent).isTrue(); + } + boolean testHeaderSent = channelProvider.isHeaderSent(TEST_HEADER_NAME, TEST_PATTERN); + assertWithMessage("Provided header was sent").that(testHeaderSent).isTrue(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/WriteHeaderTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/WriteHeaderTest.java new file mode 100644 index 000000000000..aaecc6922786 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/WriteHeaderTest.java @@ -0,0 +1,148 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1.stub; + +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.InProcessServer; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStream; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.StreamController; +import com.google.api.gax.rpc.UnimplementedException; +import com.google.cloud.bigquery.storage.v1.*; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteGrpc.BigQueryWriteImplBase; +import java.util.regex.Pattern; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class WriteHeaderTest { + + private static final String TEST_TABLE_REFERENCE = + "projects/project/datasets/dataset/tables/table"; + + private static final String TEST_STREAM_NAME = "streamName"; + + private static final String NAME = "write-header-test:456"; + + private static final String TEST_HEADER_NAME = "simple-header-name"; + private static final String TEST_HEADER_VALUE = "simple-header-value"; + private static final Pattern TEST_PATTERN = Pattern.compile(".*" + TEST_HEADER_VALUE + ".*"); + private static final String USER_AGENT_HEADER_NAME = "User-Agent"; + private static final String USER_AGENT_HEADER_VALUE = "justChecking"; + private static final Pattern USER_AGENT_PATTERN = + Pattern.compile(".*" + USER_AGENT_HEADER_VALUE + ".*"); + + private static InProcessServer server; + + private LocalChannelProvider channelProvider; + private BigQueryWriteClient client; + + @BeforeAll + public static void setUpClass() throws Exception { + server = new InProcessServer<>(new BigQueryWriteImplBase() {}, NAME); + server.start(); + } + + @BeforeEach + void setUp() throws Exception { + channelProvider = LocalChannelProvider.create(NAME); + BigQueryWriteSettings.Builder settingsBuilder = + BigQueryWriteSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setHeaderProvider( + FixedHeaderProvider.create( + TEST_HEADER_NAME, + TEST_HEADER_VALUE, + USER_AGENT_HEADER_NAME, + USER_AGENT_HEADER_VALUE)) + .setTransportChannelProvider(channelProvider); + client = BigQueryWriteClient.create(settingsBuilder.build()); + } + + @AfterEach + void tearDown() throws Exception { + client.close(); + } + + @AfterAll + static void tearDownClass() throws Exception { + server.stop(); + server.blockUntilShutdown(); + } + + @Test + void createWriteStreamTest() { + CreateWriteStreamRequest request = + CreateWriteStreamRequest.newBuilder() + .setParent(TEST_TABLE_REFERENCE) + .setWriteStream(WriteStream.newBuilder().build()) + .build(); + try { + client.createWriteStream(request); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + verifyWriteHeaderSent(); + } + + @Test + void writeRowsTest() { + BidiStreamingCallable callable = + client.appendRowsCallable(); + ApiCallContext apiCallContext = null; + ClientStream clientStream = + callable.splitCall( + new ResponseObserver() { + @Override + public void onStart(StreamController controller) {} + + @Override + public void onResponse(AppendRowsResponse response) {} + + @Override + public void onError(Throwable t) {} + + @Override + public void onComplete() {} + }, + apiCallContext); + + AppendRowsRequest request = + AppendRowsRequest.newBuilder().setWriteStream(TEST_STREAM_NAME).build(); + try { + clientStream.send(request); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + verifyWriteHeaderSent(); + } + + private void verifyWriteHeaderSent() { + boolean testHeaderSent = channelProvider.isHeaderSent(TEST_HEADER_NAME, TEST_PATTERN); + assertWithMessage("Test header was sent").that(testHeaderSent).isTrue(); + boolean userAgentHeaderSent = + channelProvider.isHeaderSent(USER_AGENT_HEADER_NAME, USER_AGENT_PATTERN); + assertWithMessage("User Agent header was sent").that(userAgentHeaderSent).isTrue(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryTest.java new file mode 100644 index 000000000000..10271438ce90 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryTest.java @@ -0,0 +1,239 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1.stub.readrows; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.InProcessServer; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1.BigQueryReadGrpc.BigQueryReadImplBase; +import com.google.cloud.bigquery.storage.v1.BigQueryReadSettings; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.common.collect.Queues; +import io.grpc.Status.Code; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.List; +import java.util.Queue; +import java.util.UUID; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class ReadRowsRetryTest { + + private TestBigQueryStorageService service; + private BigQueryReadClient client; + private InProcessServer server; + + @BeforeEach + void setUp() throws Exception { + service = new TestBigQueryStorageService(); + String serverName = UUID.randomUUID().toString(); + server = new InProcessServer<>(service, serverName); + server.start(); + + BigQueryReadSettings settings = + BigQueryReadSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setTransportChannelProvider(LocalChannelProvider.create(serverName)) + .build(); + + client = BigQueryReadClient.create(settings); + } + + @AfterEach + void tearDown() throws Exception { + client.close(); + server.stop(); + server.blockUntilShutdown(); + } + + @Test + void happyPathTest() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7)); + + assertEquals(17, getRowCount(request)); + } + + @Test + void immediateRetryTest() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7)); + + assertEquals(17, getRowCount(request)); + } + + @Test + void multipleRetryTestWithZeroInitialOffset() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithNumberOfRows(5) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 5) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create().expectRequest("fake-stream", 22).respondWithNumberOfRows(6)); + + assertEquals(28, getRowCount(request)); + } + + @Test + void multipleRetryTestWithNonZeroInitialOffset() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 17); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 17) + .respondWithNumberOfRows(5) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 22) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create().expectRequest("fake-stream", 39).respondWithNumberOfRows(3)); + + assertEquals(25, getRowCount(request)); + } + + @Test + void errorAtTheVeryEndTest() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create().expectRequest("fake-stream", 17).respondWithNumberOfRows(0)); + + assertEquals(17, getRowCount(request)); + } + + private int getRowCount(ReadRowsRequest request) { + ServerStream serverStream = client.readRowsCallable().call(request); + int rowCount = 0; + for (ReadRowsResponse readRowsResponse : serverStream) { + rowCount += readRowsResponse.getRowCount(); + } + return rowCount; + } + + private static class TestBigQueryStorageService extends BigQueryReadImplBase { + + Queue expectations = Queues.newArrayDeque(); + int currentRequestIndex = -1; + + @Override + public void readRows( + ReadRowsRequest request, StreamObserver responseObserver) { + + RpcExpectation expectedRpc = expectations.poll(); + currentRequestIndex++; + + assertNotNull( + expectedRpc, "Unexpected request #" + currentRequestIndex + ": " + request.toString()); + assertEquals( + expectedRpc.expectedRequest, + request, + "Expected request #" + + currentRequestIndex + + " does not match actual request: " + + request.toString()); + for (ReadRowsResponse response : expectedRpc.responses) { + responseObserver.onNext(response); + } + + if (expectedRpc.statusCode.toStatus().isOk()) { + responseObserver.onCompleted(); + } else { + responseObserver.onError(expectedRpc.statusCode.toStatus().asRuntimeException()); + } + } + } + + private static class RpcExpectation { + + ReadRowsRequest expectedRequest; + Code statusCode; + List responses; + + private RpcExpectation() { + statusCode = Code.OK; + responses = new ArrayList<>(); + } + + static RpcExpectation create() { + return new RpcExpectation(); + } + + static ReadRowsRequest createRequest(String streamName, long offset) { + return ReadRowsRequest.newBuilder().setReadStream(streamName).setOffset(offset).build(); + } + + static ReadRowsResponse createResponse(int numberOfRows) { + return ReadRowsResponse.newBuilder().setRowCount(numberOfRows).build(); + } + + RpcExpectation expectRequest(String streamName, long offset) { + expectedRequest = createRequest(streamName, offset); + return this; + } + + RpcExpectation respondWithNumberOfRows(int numberOfRows) { + responses.add(createResponse(numberOfRows)); + return this; + } + + RpcExpectation respondWithStatus(Code code) { + this.statusCode = code; + return this; + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClientTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClientTest.java new file mode 100644 index 000000000000..87547df18c6a --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClientTest.java @@ -0,0 +1,376 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1alpha; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class MetastorePartitionServiceClientTest { + private static MockMetastorePartitionService mockMetastorePartitionService; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private MetastorePartitionServiceClient client; + + @BeforeClass + public static void startStaticServer() { + mockMetastorePartitionService = new MockMetastorePartitionService(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), + Arrays.asList(mockMetastorePartitionService)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + MetastorePartitionServiceSettings settings = + MetastorePartitionServiceSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = MetastorePartitionServiceClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void batchCreateMetastorePartitionsTest() throws Exception { + BatchCreateMetastorePartitionsResponse expectedResponse = + BatchCreateMetastorePartitionsResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .build(); + mockMetastorePartitionService.addResponse(expectedResponse); + + BatchCreateMetastorePartitionsRequest request = + BatchCreateMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllRequests(new ArrayList()) + .setSkipExistingPartitions(true) + .setTraceId("traceId-1067401920") + .build(); + + BatchCreateMetastorePartitionsResponse actualResponse = + client.batchCreateMetastorePartitions(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockMetastorePartitionService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchCreateMetastorePartitionsRequest actualRequest = + ((BatchCreateMetastorePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getParent(), actualRequest.getParent()); + Assert.assertEquals(request.getRequestsList(), actualRequest.getRequestsList()); + Assert.assertEquals( + request.getSkipExistingPartitions(), actualRequest.getSkipExistingPartitions()); + Assert.assertEquals(request.getTraceId(), actualRequest.getTraceId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchCreateMetastorePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockMetastorePartitionService.addException(exception); + + try { + BatchCreateMetastorePartitionsRequest request = + BatchCreateMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllRequests(new ArrayList()) + .setSkipExistingPartitions(true) + .setTraceId("traceId-1067401920") + .build(); + client.batchCreateMetastorePartitions(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchDeleteMetastorePartitionsTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockMetastorePartitionService.addResponse(expectedResponse); + + BatchDeleteMetastorePartitionsRequest request = + BatchDeleteMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllPartitionValues(new ArrayList()) + .setTraceId("traceId-1067401920") + .build(); + + client.batchDeleteMetastorePartitions(request); + + List actualRequests = mockMetastorePartitionService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchDeleteMetastorePartitionsRequest actualRequest = + ((BatchDeleteMetastorePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getParent(), actualRequest.getParent()); + Assert.assertEquals(request.getPartitionValuesList(), actualRequest.getPartitionValuesList()); + Assert.assertEquals(request.getTraceId(), actualRequest.getTraceId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchDeleteMetastorePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockMetastorePartitionService.addException(exception); + + try { + BatchDeleteMetastorePartitionsRequest request = + BatchDeleteMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllPartitionValues(new ArrayList()) + .setTraceId("traceId-1067401920") + .build(); + client.batchDeleteMetastorePartitions(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchUpdateMetastorePartitionsTest() throws Exception { + BatchUpdateMetastorePartitionsResponse expectedResponse = + BatchUpdateMetastorePartitionsResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .build(); + mockMetastorePartitionService.addResponse(expectedResponse); + + BatchUpdateMetastorePartitionsRequest request = + BatchUpdateMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllRequests(new ArrayList()) + .setTraceId("traceId-1067401920") + .build(); + + BatchUpdateMetastorePartitionsResponse actualResponse = + client.batchUpdateMetastorePartitions(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockMetastorePartitionService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchUpdateMetastorePartitionsRequest actualRequest = + ((BatchUpdateMetastorePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getParent(), actualRequest.getParent()); + Assert.assertEquals(request.getRequestsList(), actualRequest.getRequestsList()); + Assert.assertEquals(request.getTraceId(), actualRequest.getTraceId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchUpdateMetastorePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockMetastorePartitionService.addException(exception); + + try { + BatchUpdateMetastorePartitionsRequest request = + BatchUpdateMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllRequests(new ArrayList()) + .setTraceId("traceId-1067401920") + .build(); + client.batchUpdateMetastorePartitions(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listMetastorePartitionsTest() throws Exception { + ListMetastorePartitionsResponse expectedResponse = + ListMetastorePartitionsResponse.newBuilder().build(); + mockMetastorePartitionService.addResponse(expectedResponse); + + TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); + + ListMetastorePartitionsResponse actualResponse = client.listMetastorePartitions(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockMetastorePartitionService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListMetastorePartitionsRequest actualRequest = + ((ListMetastorePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listMetastorePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockMetastorePartitionService.addException(exception); + + try { + TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); + client.listMetastorePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listMetastorePartitionsTest2() throws Exception { + ListMetastorePartitionsResponse expectedResponse = + ListMetastorePartitionsResponse.newBuilder().build(); + mockMetastorePartitionService.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListMetastorePartitionsResponse actualResponse = client.listMetastorePartitions(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockMetastorePartitionService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListMetastorePartitionsRequest actualRequest = + ((ListMetastorePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listMetastorePartitionsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockMetastorePartitionService.addException(exception); + + try { + String parent = "parent-995424086"; + client.listMetastorePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void streamMetastorePartitionsTest() throws Exception { + StreamMetastorePartitionsResponse expectedResponse = + StreamMetastorePartitionsResponse.newBuilder() + .setTotalPartitionsStreamedCount(-1442980886) + .setTotalPartitionsInsertedCount(-1900870109) + .build(); + mockMetastorePartitionService.addResponse(expectedResponse); + StreamMetastorePartitionsRequest request = + StreamMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllMetastorePartitions(new ArrayList()) + .setSkipExistingPartitions(true) + .build(); + + MockStreamObserver responseObserver = + new MockStreamObserver<>(); + + BidiStreamingCallable + callable = client.streamMetastorePartitionsCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + requestObserver.onCompleted(); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void streamMetastorePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockMetastorePartitionService.addException(exception); + StreamMetastorePartitionsRequest request = + StreamMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllMetastorePartitions(new ArrayList()) + .setSkipExistingPartitions(true) + .build(); + + MockStreamObserver responseObserver = + new MockStreamObserver<>(); + + BidiStreamingCallable + callable = client.streamMetastorePartitionsCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionService.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionService.java new file mode 100644 index 000000000000..788125659085 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionService.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1alpha; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockMetastorePartitionService implements MockGrpcService { + private final MockMetastorePartitionServiceImpl serviceImpl; + + public MockMetastorePartitionService() { + serviceImpl = new MockMetastorePartitionServiceImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionServiceImpl.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionServiceImpl.java new file mode 100644 index 000000000000..d0e6ae8c8a9d --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionServiceImpl.java @@ -0,0 +1,190 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1alpha; + +import com.google.api.core.BetaApi; +import com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceGrpc.MetastorePartitionServiceImplBase; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockMetastorePartitionServiceImpl extends MetastorePartitionServiceImplBase { + private List requests; + private Queue responses; + + public MockMetastorePartitionServiceImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void batchCreateMetastorePartitions( + BatchCreateMetastorePartitionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BatchCreateMetastorePartitionsResponse) { + requests.add(request); + responseObserver.onNext(((BatchCreateMetastorePartitionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchCreateMetastorePartitions," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + BatchCreateMetastorePartitionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchDeleteMetastorePartitions( + BatchDeleteMetastorePartitionsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchDeleteMetastorePartitions," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchUpdateMetastorePartitions( + BatchUpdateMetastorePartitionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BatchUpdateMetastorePartitionsResponse) { + requests.add(request); + responseObserver.onNext(((BatchUpdateMetastorePartitionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchUpdateMetastorePartitions," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + BatchUpdateMetastorePartitionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listMetastorePartitions( + ListMetastorePartitionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListMetastorePartitionsResponse) { + requests.add(request); + responseObserver.onNext(((ListMetastorePartitionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListMetastorePartitions, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + ListMetastorePartitionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public StreamObserver streamMetastorePartitions( + final StreamObserver responseObserver) { + StreamObserver requestObserver = + new StreamObserver() { + @Override + public void onNext(StreamMetastorePartitionsRequest value) { + requests.add(value); + final Object response = responses.remove(); + if (response instanceof StreamMetastorePartitionsResponse) { + responseObserver.onNext(((StreamMetastorePartitionsResponse) response)); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method StreamMetastorePartitions," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + StreamMetastorePartitionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } + + @Override + public void onCompleted() { + responseObserver.onCompleted(); + } + }; + return requestObserver; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClientTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClientTest.java new file mode 100644 index 000000000000..420fc9d5c818 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClientTest.java @@ -0,0 +1,376 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class MetastorePartitionServiceClientTest { + private static MockMetastorePartitionService mockMetastorePartitionService; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private MetastorePartitionServiceClient client; + + @BeforeClass + public static void startStaticServer() { + mockMetastorePartitionService = new MockMetastorePartitionService(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), + Arrays.asList(mockMetastorePartitionService)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + MetastorePartitionServiceSettings settings = + MetastorePartitionServiceSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = MetastorePartitionServiceClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void batchCreateMetastorePartitionsTest() throws Exception { + BatchCreateMetastorePartitionsResponse expectedResponse = + BatchCreateMetastorePartitionsResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .build(); + mockMetastorePartitionService.addResponse(expectedResponse); + + BatchCreateMetastorePartitionsRequest request = + BatchCreateMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllRequests(new ArrayList()) + .setSkipExistingPartitions(true) + .setTraceId("traceId-1067401920") + .build(); + + BatchCreateMetastorePartitionsResponse actualResponse = + client.batchCreateMetastorePartitions(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockMetastorePartitionService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchCreateMetastorePartitionsRequest actualRequest = + ((BatchCreateMetastorePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getParent(), actualRequest.getParent()); + Assert.assertEquals(request.getRequestsList(), actualRequest.getRequestsList()); + Assert.assertEquals( + request.getSkipExistingPartitions(), actualRequest.getSkipExistingPartitions()); + Assert.assertEquals(request.getTraceId(), actualRequest.getTraceId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchCreateMetastorePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockMetastorePartitionService.addException(exception); + + try { + BatchCreateMetastorePartitionsRequest request = + BatchCreateMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllRequests(new ArrayList()) + .setSkipExistingPartitions(true) + .setTraceId("traceId-1067401920") + .build(); + client.batchCreateMetastorePartitions(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchDeleteMetastorePartitionsTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockMetastorePartitionService.addResponse(expectedResponse); + + BatchDeleteMetastorePartitionsRequest request = + BatchDeleteMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllPartitionValues(new ArrayList()) + .setTraceId("traceId-1067401920") + .build(); + + client.batchDeleteMetastorePartitions(request); + + List actualRequests = mockMetastorePartitionService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchDeleteMetastorePartitionsRequest actualRequest = + ((BatchDeleteMetastorePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getParent(), actualRequest.getParent()); + Assert.assertEquals(request.getPartitionValuesList(), actualRequest.getPartitionValuesList()); + Assert.assertEquals(request.getTraceId(), actualRequest.getTraceId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchDeleteMetastorePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockMetastorePartitionService.addException(exception); + + try { + BatchDeleteMetastorePartitionsRequest request = + BatchDeleteMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllPartitionValues(new ArrayList()) + .setTraceId("traceId-1067401920") + .build(); + client.batchDeleteMetastorePartitions(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchUpdateMetastorePartitionsTest() throws Exception { + BatchUpdateMetastorePartitionsResponse expectedResponse = + BatchUpdateMetastorePartitionsResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .build(); + mockMetastorePartitionService.addResponse(expectedResponse); + + BatchUpdateMetastorePartitionsRequest request = + BatchUpdateMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllRequests(new ArrayList()) + .setTraceId("traceId-1067401920") + .build(); + + BatchUpdateMetastorePartitionsResponse actualResponse = + client.batchUpdateMetastorePartitions(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockMetastorePartitionService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchUpdateMetastorePartitionsRequest actualRequest = + ((BatchUpdateMetastorePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getParent(), actualRequest.getParent()); + Assert.assertEquals(request.getRequestsList(), actualRequest.getRequestsList()); + Assert.assertEquals(request.getTraceId(), actualRequest.getTraceId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchUpdateMetastorePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockMetastorePartitionService.addException(exception); + + try { + BatchUpdateMetastorePartitionsRequest request = + BatchUpdateMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllRequests(new ArrayList()) + .setTraceId("traceId-1067401920") + .build(); + client.batchUpdateMetastorePartitions(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listMetastorePartitionsTest() throws Exception { + ListMetastorePartitionsResponse expectedResponse = + ListMetastorePartitionsResponse.newBuilder().build(); + mockMetastorePartitionService.addResponse(expectedResponse); + + TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); + + ListMetastorePartitionsResponse actualResponse = client.listMetastorePartitions(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockMetastorePartitionService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListMetastorePartitionsRequest actualRequest = + ((ListMetastorePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listMetastorePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockMetastorePartitionService.addException(exception); + + try { + TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); + client.listMetastorePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listMetastorePartitionsTest2() throws Exception { + ListMetastorePartitionsResponse expectedResponse = + ListMetastorePartitionsResponse.newBuilder().build(); + mockMetastorePartitionService.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListMetastorePartitionsResponse actualResponse = client.listMetastorePartitions(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockMetastorePartitionService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListMetastorePartitionsRequest actualRequest = + ((ListMetastorePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listMetastorePartitionsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockMetastorePartitionService.addException(exception); + + try { + String parent = "parent-995424086"; + client.listMetastorePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void streamMetastorePartitionsTest() throws Exception { + StreamMetastorePartitionsResponse expectedResponse = + StreamMetastorePartitionsResponse.newBuilder() + .setTotalPartitionsStreamedCount(-1442980886) + .setTotalPartitionsInsertedCount(-1900870109) + .build(); + mockMetastorePartitionService.addResponse(expectedResponse); + StreamMetastorePartitionsRequest request = + StreamMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllMetastorePartitions(new ArrayList()) + .setSkipExistingPartitions(true) + .build(); + + MockStreamObserver responseObserver = + new MockStreamObserver<>(); + + BidiStreamingCallable + callable = client.streamMetastorePartitionsCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + requestObserver.onCompleted(); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void streamMetastorePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockMetastorePartitionService.addException(exception); + StreamMetastorePartitionsRequest request = + StreamMetastorePartitionsRequest.newBuilder() + .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .addAllMetastorePartitions(new ArrayList()) + .setSkipExistingPartitions(true) + .build(); + + MockStreamObserver responseObserver = + new MockStreamObserver<>(); + + BidiStreamingCallable + callable = client.streamMetastorePartitionsCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionService.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionService.java new file mode 100644 index 000000000000..ce8b9667a510 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionService.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockMetastorePartitionService implements MockGrpcService { + private final MockMetastorePartitionServiceImpl serviceImpl; + + public MockMetastorePartitionService() { + serviceImpl = new MockMetastorePartitionServiceImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionServiceImpl.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionServiceImpl.java new file mode 100644 index 000000000000..c342ddfd1158 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionServiceImpl.java @@ -0,0 +1,190 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta; + +import com.google.api.core.BetaApi; +import com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceGrpc.MetastorePartitionServiceImplBase; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockMetastorePartitionServiceImpl extends MetastorePartitionServiceImplBase { + private List requests; + private Queue responses; + + public MockMetastorePartitionServiceImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void batchCreateMetastorePartitions( + BatchCreateMetastorePartitionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BatchCreateMetastorePartitionsResponse) { + requests.add(request); + responseObserver.onNext(((BatchCreateMetastorePartitionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchCreateMetastorePartitions," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + BatchCreateMetastorePartitionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchDeleteMetastorePartitions( + BatchDeleteMetastorePartitionsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchDeleteMetastorePartitions," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchUpdateMetastorePartitions( + BatchUpdateMetastorePartitionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BatchUpdateMetastorePartitionsResponse) { + requests.add(request); + responseObserver.onNext(((BatchUpdateMetastorePartitionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchUpdateMetastorePartitions," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + BatchUpdateMetastorePartitionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listMetastorePartitions( + ListMetastorePartitionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListMetastorePartitionsResponse) { + requests.add(request); + responseObserver.onNext(((ListMetastorePartitionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListMetastorePartitions, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + ListMetastorePartitionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public StreamObserver streamMetastorePartitions( + final StreamObserver responseObserver) { + StreamObserver requestObserver = + new StreamObserver() { + @Override + public void onNext(StreamMetastorePartitionsRequest value) { + requests.add(value); + final Object response = responses.remove(); + if (response instanceof StreamMetastorePartitionsResponse) { + responseObserver.onNext(((StreamMetastorePartitionsResponse) response)); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method StreamMetastorePartitions," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + StreamMetastorePartitionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } + + @Override + public void onCompleted() { + responseObserver.onCompleted(); + } + }; + return requestObserver; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java new file mode 100644 index 000000000000..cc018cedda55 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java @@ -0,0 +1,358 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StatusCode; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import com.google.protobuf.Timestamp; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class BaseBigQueryStorageClientTest { + private static MockBigQueryStorage mockBigQueryStorage; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private BaseBigQueryStorageClient client; + + @BeforeClass + public static void startStaticServer() { + mockBigQueryStorage = new MockBigQueryStorage(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockBigQueryStorage)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + BaseBigQueryStorageSettings settings = + BaseBigQueryStorageSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = BaseBigQueryStorageClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void createReadSessionTest() throws Exception { + Storage.ReadSession expectedResponse = + Storage.ReadSession.newBuilder() + .setName("name3373707") + .setExpireTime(Timestamp.newBuilder().build()) + .addAllStreams(new ArrayList()) + .setTableReference(TableReferenceProto.TableReference.newBuilder().build()) + .setTableModifiers(TableReferenceProto.TableModifiers.newBuilder().build()) + .setShardingStrategy(Storage.ShardingStrategy.forNumber(0)) + .build(); + mockBigQueryStorage.addResponse(expectedResponse); + + TableReferenceProto.TableReference tableReference = + TableReferenceProto.TableReference.newBuilder().build(); + ProjectName parent = ProjectName.of("[PROJECT]"); + int requestedStreams = 1017221410; + + Storage.ReadSession actualResponse = + client.createReadSession(tableReference, parent, requestedStreams); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + Storage.CreateReadSessionRequest actualRequest = + ((Storage.CreateReadSessionRequest) actualRequests.get(0)); + + Assert.assertEquals(tableReference, actualRequest.getTableReference()); + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(requestedStreams, actualRequest.getRequestedStreams()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createReadSessionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryStorage.addException(exception); + + try { + TableReferenceProto.TableReference tableReference = + TableReferenceProto.TableReference.newBuilder().build(); + ProjectName parent = ProjectName.of("[PROJECT]"); + int requestedStreams = 1017221410; + client.createReadSession(tableReference, parent, requestedStreams); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createReadSessionTest2() throws Exception { + Storage.ReadSession expectedResponse = + Storage.ReadSession.newBuilder() + .setName("name3373707") + .setExpireTime(Timestamp.newBuilder().build()) + .addAllStreams(new ArrayList()) + .setTableReference(TableReferenceProto.TableReference.newBuilder().build()) + .setTableModifiers(TableReferenceProto.TableModifiers.newBuilder().build()) + .setShardingStrategy(Storage.ShardingStrategy.forNumber(0)) + .build(); + mockBigQueryStorage.addResponse(expectedResponse); + + TableReferenceProto.TableReference tableReference = + TableReferenceProto.TableReference.newBuilder().build(); + String parent = "parent-995424086"; + int requestedStreams = 1017221410; + + Storage.ReadSession actualResponse = + client.createReadSession(tableReference, parent, requestedStreams); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + Storage.CreateReadSessionRequest actualRequest = + ((Storage.CreateReadSessionRequest) actualRequests.get(0)); + + Assert.assertEquals(tableReference, actualRequest.getTableReference()); + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(requestedStreams, actualRequest.getRequestedStreams()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createReadSessionExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryStorage.addException(exception); + + try { + TableReferenceProto.TableReference tableReference = + TableReferenceProto.TableReference.newBuilder().build(); + String parent = "parent-995424086"; + int requestedStreams = 1017221410; + client.createReadSession(tableReference, parent, requestedStreams); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void readRowsTest() throws Exception { + Storage.ReadRowsResponse expectedResponse = + Storage.ReadRowsResponse.newBuilder() + .setRowCount(1340416618) + .setStatus(Storage.StreamStatus.newBuilder().build()) + .setThrottleStatus(Storage.ThrottleStatus.newBuilder().build()) + .build(); + mockBigQueryStorage.addResponse(expectedResponse); + Storage.ReadRowsRequest request = + Storage.ReadRowsRequest.newBuilder() + .setReadPosition(Storage.StreamPosition.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = + client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void readRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryStorage.addException(exception); + Storage.ReadRowsRequest request = + Storage.ReadRowsRequest.newBuilder() + .setReadPosition(Storage.StreamPosition.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = + client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void batchCreateReadSessionStreamsTest() throws Exception { + Storage.BatchCreateReadSessionStreamsResponse expectedResponse = + Storage.BatchCreateReadSessionStreamsResponse.newBuilder() + .addAllStreams(new ArrayList()) + .build(); + mockBigQueryStorage.addResponse(expectedResponse); + + Storage.ReadSession session = Storage.ReadSession.newBuilder().build(); + int requestedStreams = 1017221410; + + Storage.BatchCreateReadSessionStreamsResponse actualResponse = + client.batchCreateReadSessionStreams(session, requestedStreams); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + Storage.BatchCreateReadSessionStreamsRequest actualRequest = + ((Storage.BatchCreateReadSessionStreamsRequest) actualRequests.get(0)); + + Assert.assertEquals(session, actualRequest.getSession()); + Assert.assertEquals(requestedStreams, actualRequest.getRequestedStreams()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchCreateReadSessionStreamsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryStorage.addException(exception); + + try { + Storage.ReadSession session = Storage.ReadSession.newBuilder().build(); + int requestedStreams = 1017221410; + client.batchCreateReadSessionStreams(session, requestedStreams); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void finalizeStreamTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockBigQueryStorage.addResponse(expectedResponse); + + Storage.Stream stream = Storage.Stream.newBuilder().build(); + + client.finalizeStream(stream); + + List actualRequests = mockBigQueryStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + Storage.FinalizeStreamRequest actualRequest = + ((Storage.FinalizeStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(stream, actualRequest.getStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void finalizeStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryStorage.addException(exception); + + try { + Storage.Stream stream = Storage.Stream.newBuilder().build(); + client.finalizeStream(stream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void splitReadStreamTest() throws Exception { + Storage.SplitReadStreamResponse expectedResponse = + Storage.SplitReadStreamResponse.newBuilder() + .setPrimaryStream(Storage.Stream.newBuilder().build()) + .setRemainderStream(Storage.Stream.newBuilder().build()) + .build(); + mockBigQueryStorage.addResponse(expectedResponse); + + Storage.Stream originalStream = Storage.Stream.newBuilder().build(); + + Storage.SplitReadStreamResponse actualResponse = client.splitReadStream(originalStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryStorage.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + Storage.SplitReadStreamRequest actualRequest = + ((Storage.SplitReadStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(originalStream, actualRequest.getOriginalStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void splitReadStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryStorage.addException(exception); + + try { + Storage.Stream originalStream = Storage.Stream.newBuilder().build(); + client.splitReadStream(originalStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java new file mode 100644 index 000000000000..614b3aa369b5 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java @@ -0,0 +1,450 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta1; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.InternalException; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.ResourceExhaustedException; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StatusCode; +import com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession; +import com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.Stream; +import com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition; +import com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Duration; +import com.google.protobuf.Empty; +import com.google.protobuf.Parser; +import com.google.rpc.RetryInfo; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class BigQueryStorageClientTest { + private static MockBigQueryStorage mockBigQueryStorage; + private static MockServiceHelper serviceHelper; + private BigQueryStorageClient client; + private LocalChannelProvider channelProvider; + private int retryCount; + private Code lastRetryStatusCode; + + @BeforeAll + static void startStaticServer() { + mockBigQueryStorage = new MockBigQueryStorage(); + serviceHelper = + new MockServiceHelper("in-process-1", Arrays.asList(mockBigQueryStorage)); + serviceHelper.start(); + } + + @AfterAll + static void stopServer() { + serviceHelper.stop(); + } + + @BeforeEach + void setUp() throws IOException { + serviceHelper.reset(); + channelProvider = serviceHelper.createChannelProvider(); + retryCount = 0; + lastRetryStatusCode = Code.OK; + BigQueryStorageSettings settings = + BigQueryStorageSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setReadRowsRetryAttemptListener( + new BigQueryStorageSettings.RetryAttemptListener() { + @Override + public void onRetryAttempt(Status prevStatus, Metadata prevMetadata) { + synchronized (this) { + retryCount += 1; + lastRetryStatusCode = prevStatus.getCode(); + } + } + }) + .build(); + client = BigQueryStorageClient.create(settings); + } + + @AfterEach + void tearDown() throws Exception { + client.close(); + } + + @Test + @SuppressWarnings("all") + void createReadSessionTest() { + String name = "name3373707"; + ReadSession expectedResponse = ReadSession.newBuilder().setName(name).build(); + mockBigQueryStorage.addResponse(expectedResponse); + + TableReference tableReference = TableReference.newBuilder().build(); + String parent = "parent-995424086"; + int requestedStreams = 1017221410; + + ReadSession actualResponse = client.createReadSession(tableReference, parent, requestedStreams); + assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryStorage.getRequests(); + assertEquals(1, actualRequests.size()); + CreateReadSessionRequest actualRequest = (CreateReadSessionRequest) actualRequests.get(0); + + assertEquals(tableReference, actualRequest.getTableReference()); + assertEquals(parent, actualRequest.getParent()); + assertEquals(requestedStreams, actualRequest.getRequestedStreams()); + assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + void createReadSessionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockBigQueryStorage.addException(exception); + + TableReference tableReference = TableReference.newBuilder().build(); + String parent = "parent-995424086"; + int requestedStreams = 1017221410; + + assertThrows( + InvalidArgumentException.class, + () -> client.createReadSession(tableReference, parent, requestedStreams)); + } + + @Test + @SuppressWarnings("all") + void readRowsTest() throws Exception { + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().build(); + mockBigQueryStorage.addResponse(expectedResponse); + StreamPosition readPosition = StreamPosition.newBuilder().build(); + ReadRowsRequest request = ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + List actualResponses = responseObserver.future().get(); + assertEquals(1, actualResponses.size()); + assertEquals(expectedResponse, actualResponses.get(0)); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); + } + + @Test + @SuppressWarnings("all") + void readRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockBigQueryStorage.addException(exception); + StreamPosition readPosition = StreamPosition.newBuilder().build(); + ReadRowsRequest request = ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + ExecutionException e = + assertThrows(ExecutionException.class, () -> responseObserver.future().get()); + assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); + assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); + } + + @Test + @SuppressWarnings("all") + void batchCreateReadSessionStreamsTest() { + BatchCreateReadSessionStreamsResponse expectedResponse = + BatchCreateReadSessionStreamsResponse.newBuilder().build(); + mockBigQueryStorage.addResponse(expectedResponse); + + ReadSession session = ReadSession.newBuilder().build(); + int requestedStreams = 1017221410; + + BatchCreateReadSessionStreamsResponse actualResponse = + client.batchCreateReadSessionStreams(session, requestedStreams); + assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryStorage.getRequests(); + assertEquals(1, actualRequests.size()); + BatchCreateReadSessionStreamsRequest actualRequest = + (BatchCreateReadSessionStreamsRequest) actualRequests.get(0); + + assertEquals(session, actualRequest.getSession()); + assertEquals(requestedStreams, actualRequest.getRequestedStreams()); + assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + void batchCreateReadSessionStreamsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockBigQueryStorage.addException(exception); + + ReadSession session = ReadSession.newBuilder().build(); + int requestedStreams = 1017221410; + + assertThrows( + InvalidArgumentException.class, + () -> client.batchCreateReadSessionStreams(session, requestedStreams)); + } + + @Test + @SuppressWarnings("all") + void finalizeStreamTest() { + Empty expectedResponse = Empty.newBuilder().build(); + mockBigQueryStorage.addResponse(expectedResponse); + + Stream stream = Stream.newBuilder().build(); + + client.finalizeStream(stream); + + List actualRequests = mockBigQueryStorage.getRequests(); + assertEquals(1, actualRequests.size()); + FinalizeStreamRequest actualRequest = (FinalizeStreamRequest) actualRequests.get(0); + + assertEquals(stream, actualRequest.getStream()); + assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + void finalizeStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockBigQueryStorage.addException(exception); + + Stream stream = Stream.newBuilder().build(); + + assertThrows(InvalidArgumentException.class, () -> client.finalizeStream(stream)); + } + + @Test + @SuppressWarnings("all") + void splitReadStreamTest() { + SplitReadStreamResponse expectedResponse = SplitReadStreamResponse.newBuilder().build(); + mockBigQueryStorage.addResponse(expectedResponse); + + Stream originalStream = Stream.newBuilder().build(); + + SplitReadStreamResponse actualResponse = client.splitReadStream(originalStream); + assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryStorage.getRequests(); + assertEquals(1, actualRequests.size()); + SplitReadStreamRequest actualRequest = (SplitReadStreamRequest) actualRequests.get(0); + + assertEquals(originalStream, actualRequest.getOriginalStream()); + assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + void splitReadStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockBigQueryStorage.addException(exception); + + Stream originalStream = Stream.newBuilder().build(); + + assertThrows(InvalidArgumentException.class, () -> client.splitReadStream(originalStream)); + } + + @Test + @SuppressWarnings("all") + void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException { + ApiException exception = + new InternalException( + new StatusRuntimeException( + Status.INTERNAL.withDescription( + "Received unexpected EOS on DATA frame from server")), + GrpcStatusCode.of(Code.INTERNAL), + /* retryable= */ false); + mockBigQueryStorage.addException(exception); + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryStorage.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + List actualResponses = responseObserver.future().get(); + assertEquals(1, actualResponses.size()); + + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.INTERNAL); + } + + @Test + @SuppressWarnings("all") + void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException { + ApiException exception = + new InternalException( + new StatusRuntimeException( + Status.INTERNAL.withDescription( + "HTTP/2 error code: INTERNAL_ERROR\nReceived Rst Stream")), + GrpcStatusCode.of(Code.INTERNAL), + /* retryable= */ false); + mockBigQueryStorage.addException(exception); + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryStorage.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + List actualResponses = responseObserver.future().get(); + assertEquals(1, actualResponses.size()); + + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.INTERNAL); + } + + @Test + @SuppressWarnings("all") + void readRowsNoRetryForResourceExhaustedWithoutRetryInfo() + throws ExecutionException, InterruptedException { + ApiException exception = + new ResourceExhaustedException( + new StatusRuntimeException( + Status.RESOURCE_EXHAUSTED.withDescription("You are out of quota X")), + GrpcStatusCode.of(Code.RESOURCE_EXHAUSTED), + /* retryable= */ false); + mockBigQueryStorage.addException(exception); + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryStorage.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + ExecutionException e = + assertThrows(ExecutionException.class, () -> responseObserver.future().get()); + assertTrue(e.getCause() instanceof ResourceExhaustedException); + ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause(); + assertEquals(StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode()); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); + } + + @Test + @SuppressWarnings("all") + void readRowsNoRetryForResourceExhaustedWithRetryInfo() + throws ExecutionException, InterruptedException { + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay(Duration.newBuilder().setSeconds(123).setNanos(456).build()) + .build(); + + Metadata metadata = new Metadata(); + metadata.put( + Metadata.Key.of( + "google.rpc.retryinfo-bin", + new Metadata.BinaryMarshaller() { + @Override + public byte[] toBytes(RetryInfo value) { + return value.toByteArray(); + } + + @Override + public RetryInfo parseBytes(byte[] serialized) { + try { + Parser parser = (RetryInfo.newBuilder().build()).getParserForType(); + return parser.parseFrom(serialized); + } catch (Exception e) { + return null; + } + } + }), + retryInfo); + + ApiException exception = + new ResourceExhaustedException( + new StatusRuntimeException( + Status.RESOURCE_EXHAUSTED.withDescription("Try again in a bit"), metadata), + GrpcStatusCode.of(Code.RESOURCE_EXHAUSTED), + /* retryable= */ false); + mockBigQueryStorage.addException(exception); + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryStorage.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + List actualResponses = responseObserver.future().get(); + assertEquals(1, actualResponses.size()); + + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.RESOURCE_EXHAUSTED); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java new file mode 100644 index 000000000000..f33d9b5b853e --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBigQueryStorage implements MockGrpcService { + private final MockBigQueryStorageImpl serviceImpl; + + public MockBigQueryStorage() { + serviceImpl = new MockBigQueryStorageImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java new file mode 100644 index 000000000000..6ebe39bbfa20 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java @@ -0,0 +1,170 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1; + +import com.google.api.core.BetaApi; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageGrpc.BigQueryStorageImplBase; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBigQueryStorageImpl extends BigQueryStorageImplBase { + private List requests; + private Queue responses; + + public MockBigQueryStorageImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createReadSession( + Storage.CreateReadSessionRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Storage.ReadSession) { + requests.add(request); + responseObserver.onNext(((Storage.ReadSession) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateReadSession, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Storage.ReadSession.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void readRows( + Storage.ReadRowsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Storage.ReadRowsResponse) { + requests.add(request); + responseObserver.onNext(((Storage.ReadRowsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ReadRows, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Storage.ReadRowsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchCreateReadSessionStreams( + Storage.BatchCreateReadSessionStreamsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Storage.BatchCreateReadSessionStreamsResponse) { + requests.add(request); + responseObserver.onNext(((Storage.BatchCreateReadSessionStreamsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchCreateReadSessionStreams, expected" + + " %s or %s", + response == null ? "null" : response.getClass().getName(), + Storage.BatchCreateReadSessionStreamsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void finalizeStream( + Storage.FinalizeStreamRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method FinalizeStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void splitReadStream( + Storage.SplitReadStreamRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Storage.SplitReadStreamResponse) { + requests.add(request); + responseObserver.onNext(((Storage.SplitReadStreamResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method SplitReadStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Storage.SplitReadStreamResponse.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageLongRunningTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageLongRunningTest.java new file mode 100644 index 000000000000..5776cdcd0ddc --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageLongRunningTest.java @@ -0,0 +1,145 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1.it; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.ServiceOptions; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession; +import com.google.cloud.bigquery.storage.v1beta1.Storage.Stream; +import com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition; +import com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference; +import com.google.protobuf.TextFormat; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +/** + * Integration tests for BigQuery Storage API which target long running sessions. These tests can be + * enabled by setting the system property 'bigquery.storage.enable_long_running_tests' to true. + */ +class ITBigQueryStorageLongRunningTest { + + private static final Logger LOG = + Logger.getLogger(ITBigQueryStorageLongRunningTest.class.getName()); + + private static final String LONG_TESTS_ENABLED_PROPERTY = + "bigquery.storage.enable_long_running_tests"; + + private static final String LONG_TESTS_DISABLED_MESSAGE = + String.format( + "BigQuery Storage long running tests are not enabled and will be skipped. " + + "To enable them, set system property '%s' to true.", + LONG_TESTS_ENABLED_PROPERTY); + + private static BigQueryStorageClient client; + private static String parentProjectId; + + @BeforeAll + static void beforeAll() throws IOException { + Assumptions.assumeTrue( + Boolean.getBoolean(LONG_TESTS_ENABLED_PROPERTY), LONG_TESTS_DISABLED_MESSAGE); + client = BigQueryStorageClient.create(); + parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId()); + + LOG.info( + String.format( + "%s tests running with parent project: %s", + ITBigQueryStorageLongRunningTest.class.getSimpleName(), parentProjectId)); + } + + @AfterAll + static void afterAll() throws InterruptedException { + if (client != null) { + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + } + + @Test + void testLongRunningReadSession() throws InterruptedException, ExecutionException { + // This test reads a larger table with the goal of doing a simple validation of timeout settings + // for a longer running session. + + TableReference tableReference = + TableReference.newBuilder() + .setProjectId("bigquery-public-data") + .setDatasetId("samples") + .setTableId("wikipedia") + .build(); + + ReadSession session = + client.createReadSession( + /* tableReference= */ tableReference, + /* parent= */ parentProjectId, + /* requestedStreams= */ 5); + assertEquals( + 5, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table reference '%s' CreateReadSession" + + " response:%n%s", + TextFormat.printer().shortDebugString(tableReference), session.toString())); + + List> tasks = new ArrayList<>(session.getStreamsCount()); + for (final Stream stream : session.getStreamsList()) { + tasks.add(() -> readAllRowsFromStream(stream)); + } + + ExecutorService executor = Executors.newFixedThreadPool(tasks.size()); + List> results = executor.invokeAll(tasks); + executor.shutdown(); + + long rowCount = 0; + for (Future result : results) { + rowCount += result.get(); + } + + assertEquals(313_797_035, rowCount); + } + + private long readAllRowsFromStream(Stream stream) { + StreamPosition readPosition = StreamPosition.newBuilder().setStream(stream).build(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + long rowCount = 0; + ServerStream serverStream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : serverStream) { + rowCount += response.getRowCount(); + } + + LOG.info(String.format("Read total of %d rows from stream '%s'.", rowCount, stream.getName())); + return rowCount; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageTest.java new file mode 100644 index 000000000000..7b375586b596 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageTest.java @@ -0,0 +1,1334 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1.it; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; + +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.rpc.ServerStream; +import com.google.api.gax.rpc.UnauthenticatedException; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.RetryOption; +import com.google.cloud.ServiceOptions; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.Field.Mode; +import com.google.cloud.bigquery.Job; +import com.google.cloud.bigquery.JobInfo; +import com.google.cloud.bigquery.JobInfo.WriteDisposition; +import com.google.cloud.bigquery.LegacySQLTypeName; +import com.google.cloud.bigquery.QueryJobConfiguration; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import com.google.cloud.bigquery.TimePartitioning; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageSettings; +import com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions; +import com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession; +import com.google.cloud.bigquery.storage.v1beta1.Storage.Stream; +import com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition; +import com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers; +import com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference; +import com.google.cloud.bigquery.storage.v1beta1.it.SimpleRowReader.AvroRowConsumer; +import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; +import com.google.common.base.Preconditions; +import com.google.protobuf.TextFormat; +import com.google.protobuf.Timestamp; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.math.BigDecimal; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import org.apache.avro.Conversions; +import org.apache.avro.LogicalTypes; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericRecordBuilder; +import org.apache.avro.util.Utf8; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +/** Integration tests for BigQuery Storage API. */ +class ITBigQueryStorageTest { + + private static final Logger LOG = Logger.getLogger(ITBigQueryStorageTest.class.getName()); + private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); + private static final String DESCRIPTION = "BigQuery Storage Java client test dataset"; + + private static BigQueryStorageClient client; + private static String parentProjectId; + private static BigQuery bigquery; + + private static final String FAKE_JSON_CRED_WITH_GOOGLE_DOMAIN = + "{\n" + + " \"private_key_id\": \"somekeyid\",\n" + + " \"private_key\": \"-----BEGIN PRIVATE KEY-----\\n" + + "MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC+K2hSuFpAdrJI\\n" + + "nCgcDz2M7t7bjdlsadsasad+fvRSW6TjNQZ3p5LLQY1kSZRqBqylRkzteMOyHgaR\\n" + + "0Pmxh3ILCND5men43j3h4eDbrhQBuxfEMalkG92sL+PNQSETY2tnvXryOvmBRwa/\\n" + + "QP/9dJfIkIDJ9Fw9N4Bhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "knddadwkwewcVxHFhcZJO+XWf6ofLUXpRwiTZakGMn8EE1uVa2LgczOjwWHGi99MFjxSer5m9\\n" + + "1tCa3/KEGKiS/YL71JvjwX3mb+cewlkcmweBKZHM2JPTk0ZednFSpVZMtycjkbLa\\n" + + "dYOS8V85AgMBewECggEBAKksaldajfDZDV6nGqbFjMiizAKJolr/M3OQw16K6o3/\\n" + + "0S31xIe3sSlgW0+UbYlF4U8KifhManD1apVSC3csafaspP4RZUHFhtBywLO9pR5c\\n" + + "r6S5aLp+gPWFyIp1pfXbWGvc5VY/v9x7ya1VEa6rXvLsKupSeWAW4tMj3eo/64ge\\n" + + "sdaceaLYw52KeBYiT6+vpsnYrEkAHO1fF/LavbLLOFJmFTMxmsNaG0tuiJHgjshB\\n" + + "82DpMCbXG9YcCgI/DbzuIjsdj2JC1cascSP//3PmefWysucBQe7Jryb6NQtASmnv\\n" + + "CdDw/0jmZTEjpe4S1lxfHplAhHFtdgYTvyYtaLZiVVkCgYEA8eVpof2rceecw/I6\\n" + + "5ng1q3Hl2usdWV/4mZMvR0fOemacLLfocX6IYxT1zA1FFJlbXSRsJMf/Qq39mOR2\\n" + + "SpW+hr4jCoHeRVYLgsbggtrevGmILAlNoqCMpGZ6vDmJpq6ECV9olliDvpPgWOP+\\n" + + "mYPDreFBGxWvQrADNbRt2dmGsrsCgYEAyUHqB2wvJHFqdmeBsaacewzV8x9WgmeX\\n" + + "gUIi9REwXlGDW0Mz50dxpxcKCAYn65+7TCnY5O/jmL0VRxU1J2mSWyWTo1C+17L0\\n" + + "3fUqjxL1pkefwecxwecvC+gFFYdJ4CQ/MHHXU81Lwl1iWdFCd2UoGddYaOF+KNeM\\n" + + "HC7cmqra+JsCgYEAlUNywzq8nUg7282E+uICfCB0LfwejuymR93CtsFgb7cRd6ak\\n" + + "ECR8FGfCpH8ruWJINllbQfcHVCX47ndLZwqv3oVFKh6pAS/vVI4dpOepP8++7y1u\\n" + + "coOvtreXCX6XqfrWDtKIvv0vjlHBhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "kndj5uNl5SiuVxHFhcZJO+XWf6ofLUregtevZakGMn8EE1uVa2AY7eafmoU/nZPT\\n" + + "00YB0TBATdCbn/nBSuKDESkhSg9s2GEKQZG5hBmL5uCMfo09z3SfxZIhJdlerreP\\n" + + "J7gSidI12N+EZxYd4xIJh/HFDgp7RRO87f+WJkofMQKBgGTnClK1VMaCRbJZPriw\\n" + + "EfeFCoOX75MxKwXs6xgrw4W//AYGGUjDt83lD6AZP6tws7gJ2IwY/qP7+lyhjEqN\\n" + + "HtfPZRGFkGZsdaksdlaksd323423d+15/UvrlRSFPNj1tWQmNKkXyRDW4IG1Oa2p\\n" + + "rALStNBx5Y9t0/LQnFI4w3aG\\n" + + "-----END PRIVATE KEY-----\\n" + + "\",\n" + + " \"project_id\": \"someprojectid\",\n" + + " \"client_email\": \"someclientid@developer.gserviceaccount.com\",\n" + + " \"client_id\": \"someclientid.apps.googleusercontent.com\",\n" + + " \"type\": \"service_account\",\n" + + " \"universe_domain\": \"googleapis.com\"\n" + + "}"; + + private static final String FAKE_JSON_CRED_WITH_INVALID_DOMAIN = + "{\n" + + " \"private_key_id\": \"somekeyid\",\n" + + " \"private_key\": \"-----BEGIN PRIVATE KEY-----\\n" + + "MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC+K2hSuFpAdrJI\\n" + + "nCgcDz2M7t7bjdlsadsasad+fvRSW6TjNQZ3p5LLQY1kSZRqBqylRkzteMOyHgaR\\n" + + "0Pmxh3ILCND5men43j3h4eDbrhQBuxfEMalkG92sL+PNQSETY2tnvXryOvmBRwa/\\n" + + "QP/9dJfIkIDJ9Fw9N4Bhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "knddadwkwewcVxHFhcZJO+XWf6ofLUXpRwiTZakGMn8EE1uVa2LgczOjwWHGi99MFjxSer5m9\\n" + + "1tCa3/KEGKiS/YL71JvjwX3mb+cewlkcmweBKZHM2JPTk0ZednFSpVZMtycjkbLa\\n" + + "dYOS8V85AgMBewECggEBAKksaldajfDZDV6nGqbFjMiizAKJolr/M3OQw16K6o3/\\n" + + "0S31xIe3sSlgW0+UbYlF4U8KifhManD1apVSC3csafaspP4RZUHFhtBywLO9pR5c\\n" + + "r6S5aLp+gPWFyIp1pfXbWGvc5VY/v9x7ya1VEa6rXvLsKupSeWAW4tMj3eo/64ge\\n" + + "sdaceaLYw52KeBYiT6+vpsnYrEkAHO1fF/LavbLLOFJmFTMxmsNaG0tuiJHgjshB\\n" + + "82DpMCbXG9YcCgI/DbzuIjsdj2JC1cascSP//3PmefWysucBQe7Jryb6NQtASmnv\\n" + + "CdDw/0jmZTEjpe4S1lxfHplAhHFtdgYTvyYtaLZiVVkCgYEA8eVpof2rceecw/I6\\n" + + "5ng1q3Hl2usdWV/4mZMvR0fOemacLLfocX6IYxT1zA1FFJlbXSRsJMf/Qq39mOR2\\n" + + "SpW+hr4jCoHeRVYLgsbggtrevGmILAlNoqCMpGZ6vDmJpq6ECV9olliDvpPgWOP+\\n" + + "mYPDreFBGxWvQrADNbRt2dmGsrsCgYEAyUHqB2wvJHFqdmeBsaacewzV8x9WgmeX\\n" + + "gUIi9REwXlGDW0Mz50dxpxcKCAYn65+7TCnY5O/jmL0VRxU1J2mSWyWTo1C+17L0\\n" + + "3fUqjxL1pkefwecxwecvC+gFFYdJ4CQ/MHHXU81Lwl1iWdFCd2UoGddYaOF+KNeM\\n" + + "HC7cmqra+JsCgYEAlUNywzq8nUg7282E+uICfCB0LfwejuymR93CtsFgb7cRd6ak\\n" + + "ECR8FGfCpH8ruWJINllbQfcHVCX47ndLZwqv3oVFKh6pAS/vVI4dpOepP8++7y1u\\n" + + "coOvtreXCX6XqfrWDtKIvv0vjlHBhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "kndj5uNl5SiuVxHFhcZJO+XWf6ofLUregtevZakGMn8EE1uVa2AY7eafmoU/nZPT\\n" + + "00YB0TBATdCbn/nBSuKDESkhSg9s2GEKQZG5hBmL5uCMfo09z3SfxZIhJdlerreP\\n" + + "J7gSidI12N+EZxYd4xIJh/HFDgp7RRO87f+WJkofMQKBgGTnClK1VMaCRbJZPriw\\n" + + "EfeFCoOX75MxKwXs6xgrw4W//AYGGUjDt83lD6AZP6tws7gJ2IwY/qP7+lyhjEqN\\n" + + "HtfPZRGFkGZsdaksdlaksd323423d+15/UvrlRSFPNj1tWQmNKkXyRDW4IG1Oa2p\\n" + + "rALStNBx5Y9t0/LQnFI4w3aG\\n" + + "-----END PRIVATE KEY-----\\n" + + "\",\n" + + " \"project_id\": \"someprojectid\",\n" + + " \"client_email\": \"someclientid@developer.gserviceaccount.com\",\n" + + " \"client_id\": \"someclientid.apps.googleusercontent.com\",\n" + + " \"type\": \"service_account\",\n" + + " \"universe_domain\": \"fake.domain\"\n" + + "}"; + + @BeforeAll + static void beforeAll() throws IOException { + client = BigQueryStorageClient.create(); + parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId()); + + LOG.info( + String.format( + "%s tests running with parent project: %s", + ITBigQueryStorageTest.class.getSimpleName(), parentProjectId)); + + RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); + bigquery = bigqueryHelper.getOptions().getService(); + DatasetInfo datasetInfo = + DatasetInfo.newBuilder(/* datasetId= */ DATASET).setDescription(DESCRIPTION).build(); + bigquery.create(datasetInfo); + LOG.info("Created test dataset: " + DATASET); + } + + @AfterAll + static void afterAll() throws InterruptedException { + if (client != null) { + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + + if (bigquery != null) { + RemoteBigQueryHelper.forceDelete(bigquery, DATASET); + LOG.info("Deleted test dataset: " + DATASET); + } + } + + @Test + void testSimpleRead() { + TableReference tableReference = + TableReference.newBuilder() + .setProjectId("bigquery-public-data") + .setDatasetId("samples") + .setTableId("shakespeare") + .build(); + + ReadSession session = + client.createReadSession( + /* tableReference= */ tableReference, + /* parent= */ parentProjectId, + /* requestedStreams= */ 1); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table reference '%s' CreateReadSession" + + " response:%n%s", + TextFormat.printer().shortDebugString(tableReference), session.toString())); + + StreamPosition readPosition = + StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + long rowCount = 0; + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + assertEquals(164_656, rowCount); + } + + @Test + void testSimpleReadArrow() { + TableReference tableReference = + TableReference.newBuilder() + .setProjectId("bigquery-public-data") + .setDatasetId("samples") + .setTableId("shakespeare") + .build(); + + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parentProjectId) + .setRequestedStreams(1) + .setTableReference(tableReference) + .setFormat(DataFormat.ARROW) + .build(); + ReadSession session = client.createReadSession(request); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table reference '%s' CreateReadSession" + + " response:%n%s", + TextFormat.printer().shortDebugString(tableReference), session.toString())); + + StreamPosition readPosition = + StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + long rowCount = 0; + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasArrowRecordBatch()); + rowCount += response.getRowCount(); + } + + assertEquals(164_656, rowCount); + } + + @Test + void testRangeType() throws InterruptedException { + // Create table with Range values. + String tableName = "test_range_type" + UUID.randomUUID().toString().substring(0, 8); + QueryJobConfiguration createTable = + QueryJobConfiguration.newBuilder( + String.format( + "CREATE TABLE %s AS SELECT RANGE(DATE '2020-01-01', DATE '2020-12-31') as date," + + " \n" + + "RANGE(DATETIME '2020-01-01T12:00:00', DATETIME '2020-12-31T12:00:00') as" + + " datetime, \n" + + "RANGE(TIMESTAMP '2014-01-01 07:00:00.000000+00:00', TIMESTAMP" + + " '2015-01-01 07:00:00.000000+00:00') as timestamp", + tableName)) + .setDefaultDataset(DatasetId.of(DATASET)) + .setUseLegacySql(false) + .build(); + bigquery.query(createTable); + + TableReference tableReference = + TableReference.newBuilder() + .setProjectId(ServiceOptions.getDefaultProjectId()) + .setDatasetId(DATASET) + .setTableId(tableName) + .build(); + + CreateReadSessionRequest createReadSessionRequestrequest = + CreateReadSessionRequest.newBuilder() + .setParent(parentProjectId) + .setRequestedStreams(1) + .setTableReference(tableReference) + .setFormat(DataFormat.ARROW) + .build(); + ReadSession session = client.createReadSession(createReadSessionRequestrequest); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table reference '%s' CreateReadSession" + + " response:%n%s", + TextFormat.printer().shortDebugString(tableReference), session.toString())); + + StreamPosition readPosition = + StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + long rowCount = 0; + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasArrowRecordBatch()); + rowCount += response.getRowCount(); + } + + assertEquals(1, rowCount); + } + + @Test + void testSimpleReadAndResume() { + TableReference tableReference = + TableReference.newBuilder() + .setProjectId("bigquery-public-data") + .setDatasetId("samples") + .setTableId("shakespeare") + .build(); + + ReadSession session = + client.createReadSession( + /* tableReference= */ tableReference, + /* parent= */ parentProjectId, + /* requestedStreams= */ 1); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table reference '%s' CreateReadSession" + + " response:%n%s", + TextFormat.printer().shortDebugString(tableReference), session.toString())); + + // We have to read some number of rows in order to be able to resume. More details: + // https://cloud.google.com/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1beta1#google.cloud.bigquery.storage.v1beta1.ReadRowsRequest + + long rowCount = ReadStreamToOffset(session.getStreams(0), /* rowOffset= */ 34_846); + + StreamPosition readPosition = + StreamPosition.newBuilder().setStream(session.getStreams(0)).setOffset(rowCount).build(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + // Verifies that the number of rows skipped and read equals to the total number of rows in the + // table. + assertEquals(164_656, rowCount); + } + + @Test + void testFilter() throws IOException { + TableReference tableReference = + TableReference.newBuilder() + .setProjectId("bigquery-public-data") + .setDatasetId("samples") + .setTableId("shakespeare") + .build(); + + TableReadOptions options = + TableReadOptions.newBuilder().setRowRestriction("word_count > 100").build(); + + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parentProjectId) + .setRequestedStreams(1) + .setTableReference(tableReference) + .setReadOptions(options) + .setFormat(DataFormat.AVRO) + .build(); + + ReadSession session = client.createReadSession(request); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table reference '%s' CreateReadSession" + + " response:%n%s", + TextFormat.printer().shortDebugString(tableReference), session.toString())); + + StreamPosition readPosition = + StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + SimpleRowReader reader = + new SimpleRowReader(new Schema.Parser().parse(session.getAvroSchema().getSchema())); + + long rowCount = 0; + + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + reader.processRows( + response.getAvroRows(), + new SimpleRowReader.AvroRowConsumer() { + @Override + public void accept(GenericData.Record record) { + Long wordCount = (Long) record.get("word_count"); + assertWithMessage("Row not matching expectations: %s", record.toString()) + .that(wordCount) + .isGreaterThan(100L); + } + }); + } + + assertEquals(1_333, rowCount); + } + + @Test + void testColumnSelection() throws IOException { + TableReference tableReference = + TableReference.newBuilder() + .setProjectId("bigquery-public-data") + .setDatasetId("samples") + .setTableId("shakespeare") + .build(); + + TableReadOptions options = + TableReadOptions.newBuilder() + .addSelectedFields("word") + .addSelectedFields("word_count") + .setRowRestriction("word_count > 100") + .build(); + + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parentProjectId) + .setRequestedStreams(1) + .setTableReference(tableReference) + .setReadOptions(options) + .setFormat(DataFormat.AVRO) + .build(); + + ReadSession session = client.createReadSession(request); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table reference '%s' CreateReadSession" + + " response:%n%s", + TextFormat.printer().shortDebugString(tableReference), session.toString())); + + StreamPosition readPosition = + StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + Schema avroSchema = new Schema.Parser().parse(session.getAvroSchema().getSchema()); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + + assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage); + assertEquals( + Schema.Type.STRING, avroSchema.getField("word").schema().getType(), actualSchemaMessage); + assertEquals( + Schema.Type.STRING, avroSchema.getField("word").schema().getType(), actualSchemaMessage); + assertEquals( + Schema.Type.LONG, + avroSchema.getField("word_count").schema().getType(), + actualSchemaMessage); + + SimpleRowReader reader = new SimpleRowReader(avroSchema); + + long rowCount = 0; + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + reader.processRows( + response.getAvroRows(), + new SimpleRowReader.AvroRowConsumer() { + @Override + public void accept(GenericData.Record record) { + String rowAssertMessage = + String.format("Row not matching expectations: %s", record.toString()); + + Long wordCount = (Long) record.get("word_count"); + assertWithMessage(rowAssertMessage).that(wordCount).isGreaterThan(100L); + + Utf8 word = (Utf8) record.get("word"); + assertWithMessage(rowAssertMessage).that(word.length()).isGreaterThan(0); + } + }); + } + + assertEquals(1_333, rowCount); + } + + @Test + void testReadAtSnapshot() throws InterruptedException, IOException { + Field intFieldSchema = + Field.newBuilder("col", LegacySQLTypeName.INTEGER) + .setMode(Mode.REQUIRED) + .setDescription("IntegerDescription") + .build(); + com.google.cloud.bigquery.Schema tableSchema = + com.google.cloud.bigquery.Schema.of(intFieldSchema); + + TableId testTableId = TableId.of(/* dataset= */ DATASET, /* table= */ "test_read_snapshot"); + bigquery.create(TableInfo.of(testTableId, StandardTableDefinition.of(tableSchema))); + + TableReference tableReference = + TableReference.newBuilder() + .setTableId(testTableId.getTable()) + .setDatasetId(DATASET) + .setProjectId(ServiceOptions.getDefaultProjectId()) + .build(); + + Job firstJob = + RunQueryAppendJobAndExpectSuccess( + /* destinationTableId= */ testTableId, /* query= */ "SELECT 1 AS col"); + + Job secondJob = + RunQueryAppendJobAndExpectSuccess( + /* destinationTableId= */ testTableId, /* query= */ "SELECT 2 AS col"); + + final List rowsAfterFirstSnapshot = new ArrayList<>(); + ProcessRowsAtSnapshot( + /* tableReference= */ tableReference, + /* snapshotInMillis= */ firstJob.getStatistics().getEndTime(), + /* filter= */ null, + /* consumer= */ new AvroRowConsumer() { + @Override + public void accept(GenericData.Record record) { + rowsAfterFirstSnapshot.add((Long) record.get("col")); + } + }); + assertEquals(Arrays.asList(1L), rowsAfterFirstSnapshot); + + final List rowsAfterSecondSnapshot = new ArrayList<>(); + ProcessRowsAtSnapshot( + /* tableReference= */ tableReference, + /* snapshotInMillis= */ secondJob.getStatistics().getEndTime(), + /* filter= */ null, + /* consumer= */ new AvroRowConsumer() { + @Override + public void accept(GenericData.Record record) { + rowsAfterSecondSnapshot.add((Long) record.get("col")); + } + }); + Collections.sort(rowsAfterSecondSnapshot); + assertEquals(Arrays.asList(1L, 2L), rowsAfterSecondSnapshot); + } + + @Test + void testColumnPartitionedTableByDateField() throws InterruptedException, IOException { + String partitionedTableName = + "test_column_partition_table_by_date" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s (num_field INT64, date_field DATE) " + + " PARTITION BY date_field " + + " OPTIONS( " + + " description=\"a table partitioned by date_field\" " + + " ) " + + "AS " + + " SELECT 1, CAST(\"2019-01-01\" AS DATE)" + + " UNION ALL" + + " SELECT 2, CAST(\"2019-01-02\" AS DATE)" + + " UNION ALL" + + " SELECT 3, CAST(\"2019-01-03\" AS DATE)", + DATASET, partitionedTableName); + + RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + TableReference tableReference = + TableReference.newBuilder() + .setTableId(partitionedTableName) + .setDatasetId(DATASET) + .setProjectId(ServiceOptions.getDefaultProjectId()) + .build(); + + List unfilteredRows = + ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null); + assertEquals(3, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString()); + + List partitionFilteredRows = + ReadAllRows( + /* tableReference= */ tableReference, + /* filter= */ "date_field = CAST(\"2019-01-02\" AS DATE)"); + assertEquals( + 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString()); + assertEquals(2L, partitionFilteredRows.get(0).get("num_field")); + } + + @Test + void testIngestionTimePartitionedTable() throws InterruptedException, IOException { + Field intFieldSchema = + Field.newBuilder("num_field", LegacySQLTypeName.INTEGER) + .setMode(Mode.REQUIRED) + .setDescription("IntegerDescription") + .build(); + com.google.cloud.bigquery.Schema tableSchema = + com.google.cloud.bigquery.Schema.of(intFieldSchema); + + TableId testTableId = + TableId.of(/* dataset= */ DATASET, /* table= */ "test_date_partitioned_table"); + bigquery.create( + TableInfo.of( + testTableId, + StandardTableDefinition.newBuilder() + .setTimePartitioning(TimePartitioning.of(TimePartitioning.Type.DAY)) + .setSchema(tableSchema) + .build())); + + // Simulate ingestion for 2019-01-01. + RunQueryAppendJobAndExpectSuccess( + /* destinationTableId= */ TableId.of( + /* dataset= */ DATASET, /* table= */ testTableId.getTable() + "$20190101"), + /* query= */ "SELECT 1 AS num_field"); + + // Simulate ingestion for 2019-01-02. + RunQueryAppendJobAndExpectSuccess( + /* destinationTableId= */ TableId.of( + /* dataset= */ DATASET, /* table= */ testTableId.getTable() + "$20190102"), + /* query= */ "SELECT 2 AS num_field"); + + TableReference tableReference = + TableReference.newBuilder() + .setTableId(testTableId.getTable()) + .setDatasetId(testTableId.getDataset()) + .setProjectId(ServiceOptions.getDefaultProjectId()) + .build(); + + List unfilteredRows = + ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null); + assertEquals(2, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString()); + + List partitionFilteredRows = + ReadAllRows( + /* tableReference= */ tableReference, /* filter= */ "_PARTITIONDATE > \"2019-01-01\""); + assertEquals( + 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString()); + assertEquals(2L, partitionFilteredRows.get(0).get("num_field")); + } + + @Test + void testBasicSqlTypes() throws InterruptedException, IOException { + String tableName = "test_basic_sql_types" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s " + + " (int_field INT64 NOT NULL," + + " num_field NUMERIC NOT NULL," + + " float_field FLOAT64 NOT NULL," + + " bool_field BOOL NOT NULL," + + " str_field STRING NOT NULL," + + " bytes_field BYTES NOT NULL) " + + " OPTIONS( " + + " description=\"a table with basic column types\" " + + " ) " + + "AS " + + " SELECT " + + " 17," + + " CAST(1234.56 AS NUMERIC)," + + " 6.547678," + + " TRUE," + + " \"String field value\"," + + " b\"абвгд\"", + DATASET, tableName); + + RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + TableReference tableReference = + TableReference.newBuilder() + .setTableId(tableName) + .setDatasetId(DATASET) + .setProjectId(ServiceOptions.getDefaultProjectId()) + .build(); + + List rows = + ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); + + GenericData.Record record = rows.get(0); + Schema avroSchema = record.getSchema(); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); + + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(6, avroSchema.getFields().size(), actualSchemaMessage); + + assertEquals( + Schema.Type.LONG, avroSchema.getField("int_field").schema().getType(), actualSchemaMessage); + assertEquals(17L, (long) record.get("int_field"), rowAssertMessage); + + assertEquals( + Schema.Type.BYTES, + avroSchema.getField("num_field").schema().getType(), + actualSchemaMessage); + assertEquals( + LogicalTypes.decimal(/* precision= */ 38, /* scale= */ 9), + avroSchema.getField("num_field").schema().getLogicalType(), + actualSchemaMessage); + BigDecimal actual_num_field = + new Conversions.DecimalConversion() + .fromBytes( + (ByteBuffer) record.get("num_field"), + avroSchema, + avroSchema.getField("num_field").schema().getLogicalType()); + assertEquals( + BigDecimal.valueOf(/* unscaledVal= */ 1_234_560_000_000L, /* scale= */ 9), + actual_num_field, + rowAssertMessage); + + assertEquals( + Schema.Type.DOUBLE, + avroSchema.getField("float_field").schema().getType(), + actualSchemaMessage); + assertEquals( + /* expected= */ 6.547678d, + /* actual= */ (double) record.get("float_field"), + /* delta= */ 0.0001, + rowAssertMessage); + + assertEquals( + Schema.Type.BOOLEAN, + avroSchema.getField("bool_field").schema().getType(), + actualSchemaMessage); + assertEquals(true, record.get("bool_field"), rowAssertMessage); + + assertEquals( + Schema.Type.STRING, + avroSchema.getField("str_field").schema().getType(), + actualSchemaMessage); + assertEquals(new Utf8("String field value"), record.get("str_field"), rowAssertMessage); + + assertEquals( + Schema.Type.BYTES, + avroSchema.getField("bytes_field").schema().getType(), + actualSchemaMessage); + assertArrayEquals( + Utf8.getBytesFor("абвгд"), + ((ByteBuffer) (record.get("bytes_field"))).array(), + rowAssertMessage); + } + + @Test + void testDateAndTimeSqlTypes() throws InterruptedException, IOException { + String tableName = + "test_date_and_time_sql_types" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s " + + " (date_field DATE NOT NULL," + + " datetime_field DATETIME NOT NULL," + + " time_field TIME NOT NULL," + + " timestamp_field TIMESTAMP NOT NULL)" + + " OPTIONS( " + + " description=\"a table with date and time column types\" " + + " ) " + + "AS " + + " SELECT " + + " CAST(\"2019-05-31\" AS DATE)," + + " CAST(\"2019-04-30 21:47:59.999999\" AS DATETIME)," + + " CAST(\"21:47:59.999999\" AS TIME)," + + " CAST(\"2019-04-30 19:24:19.123456 UTC\" AS TIMESTAMP)", + DATASET, tableName); + + RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + TableReference tableReference = + TableReference.newBuilder() + .setTableId(tableName) + .setDatasetId(DATASET) + .setProjectId(ServiceOptions.getDefaultProjectId()) + .build(); + + List rows = + ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); + + GenericData.Record record = rows.get(0); + Schema avroSchema = record.getSchema(); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); + + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(4, avroSchema.getFields().size(), actualSchemaMessage); + + assertEquals( + Schema.Type.INT, avroSchema.getField("date_field").schema().getType(), actualSchemaMessage); + assertEquals( + LogicalTypes.date(), + avroSchema.getField("date_field").schema().getLogicalType(), + actualSchemaMessage); + assertEquals( + LocalDate.of(/* year= */ 2019, /* month= */ 5, /* dayOfMonth= */ 31), + LocalDate.ofEpochDay((int) record.get("date_field")), + rowAssertMessage); + + assertEquals( + Schema.Type.STRING, + avroSchema.getField("datetime_field").schema().getType(), + actualSchemaMessage); + assertEquals( + "datetime", + avroSchema.getField("datetime_field").schema().getObjectProp("logicalType"), + actualSchemaMessage); + assertEquals( + new Utf8("2019-04-30T21:47:59.999999"), + (Utf8) record.get("datetime_field"), + rowAssertMessage); + + assertEquals( + Schema.Type.LONG, + avroSchema.getField("time_field").schema().getType(), + actualSchemaMessage); + assertEquals( + LogicalTypes.timeMicros(), + avroSchema.getField("time_field").schema().getLogicalType(), + actualSchemaMessage); + assertEquals( + LocalTime.of( + /* hour= */ 21, /* minute= */ 47, /* second= */ 59, /* nanoOfSecond= */ 999_999_000), + LocalTime.ofNanoOfDay(1_000L * (long) record.get("time_field")), + rowAssertMessage); + + assertEquals( + Schema.Type.LONG, + avroSchema.getField("timestamp_field").schema().getType(), + actualSchemaMessage); + assertEquals( + LogicalTypes.timestampMicros(), + avroSchema.getField("timestamp_field").schema().getLogicalType(), + actualSchemaMessage); + ZonedDateTime expected_timestamp = + ZonedDateTime.parse( + "2019-04-30T19:24:19Z", DateTimeFormatter.ISO_INSTANT.withZone(ZoneOffset.UTC)) + .withNano(123_456_000); + long actual_timestamp_micros = (long) record.get("timestamp_field"); + ZonedDateTime actual_timestamp = + ZonedDateTime.ofInstant( + Instant.ofEpochSecond( + /* epochSecond= */ actual_timestamp_micros / 1_000_000, + (actual_timestamp_micros % 1_000_000) * 1_000), + ZoneOffset.UTC); + assertEquals(expected_timestamp, actual_timestamp, rowAssertMessage); + } + + @Test + void testGeographySqlType() throws InterruptedException, IOException { + String tableName = "test_geography_sql_type" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s " + + " (geo_field GEOGRAPHY NOT NULL)" + + " OPTIONS( " + + " description=\"a table with a geography column type\" " + + " ) " + + "AS " + + " SELECT ST_GEOGPOINT(1.1, 2.2)", + DATASET, tableName); + + RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + TableReference tableReference = + TableReference.newBuilder() + .setTableId(tableName) + .setDatasetId(DATASET) + .setProjectId(ServiceOptions.getDefaultProjectId()) + .build(); + + List rows = + ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); + + GenericData.Record record = rows.get(0); + Schema avroSchema = record.getSchema(); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); + + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(1, avroSchema.getFields().size(), actualSchemaMessage); + + assertEquals( + Schema.Type.STRING, + avroSchema.getField("geo_field").schema().getType(), + actualSchemaMessage); + assertEquals( + "GEOGRAPHY", + avroSchema.getField("geo_field").schema().getObjectProp("sqlType"), + actualSchemaMessage); + assertEquals(new Utf8("POINT(1.1 2.2)"), (Utf8) record.get("geo_field"), rowAssertMessage); + } + + @Test + void testStructAndArraySqlTypes() throws InterruptedException, IOException { + String tableName = + "test_struct_and_array_sql_types" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s (array_field ARRAY, struct_field STRUCT NOT NULL) OPTIONS( description=\"a" + + " table with array and time column types\" ) AS SELECT [1, 2, 3], " + + " (10, 'abc')", + DATASET, tableName); + + RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + TableReference tableReference = + TableReference.newBuilder() + .setTableId(tableName) + .setDatasetId(DATASET) + .setProjectId(ServiceOptions.getDefaultProjectId()) + .build(); + + List rows = + ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); + + GenericData.Record record = rows.get(0); + Schema avroSchema = record.getSchema(); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); + + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage); + + assertEquals( + Schema.Type.ARRAY, + avroSchema.getField("array_field").schema().getType(), + actualSchemaMessage); + assertEquals( + Schema.Type.LONG, + avroSchema.getField("array_field").schema().getElementType().getType(), + actualSchemaMessage); + assertArrayEquals( + new Long[] {1L, 2L, 3L}, + ((GenericData.Array) record.get("array_field")).toArray(new Long[0]), + rowAssertMessage); + + // Validate the STRUCT field and its members. + Schema structSchema = avroSchema.getField("struct_field").schema(); + assertEquals(Schema.Type.RECORD, structSchema.getType(), actualSchemaMessage); + GenericData.Record structRecord = (GenericData.Record) record.get("struct_field"); + + assertEquals( + Schema.Type.LONG, + structSchema.getField("int_field").schema().getType(), + actualSchemaMessage); + assertEquals(10L, (long) structRecord.get("int_field"), rowAssertMessage); + + assertEquals( + Schema.Type.STRING, + structSchema.getField("str_field").schema().getType(), + actualSchemaMessage); + assertEquals(new Utf8("abc"), structRecord.get("str_field"), rowAssertMessage); + } + + @Test + void testUniverseDomainWithInvalidUniverseDomain() throws IOException { + BigQueryStorageSettings bigQueryStorageSettings = + BigQueryStorageSettings.newBuilder() + .setCredentialsProvider( + FixedCredentialsProvider.create(loadCredentials(FAKE_JSON_CRED_WITH_GOOGLE_DOMAIN))) + .setUniverseDomain("invalid.domain") + .build(); + + BigQueryStorageClient localClient = BigQueryStorageClient.create(bigQueryStorageSettings); + + TableReference tableReference = + TableReference.newBuilder() + .setProjectId("bigquery-public-data") + .setDatasetId("samples") + .setTableId("shakespeare") + .build(); + + UnauthenticatedException e = + assertThrows( + UnauthenticatedException.class, + () -> + localClient.createReadSession( + /* tableReference= */ tableReference, + /* parent= */ parentProjectId, + /* requestedStreams= */ 1)); + assertThat( + (e.getMessage() + .contains("does not match the universe domain found in the credentials"))) + .isTrue(); + localClient.close(); + } + + @Test + void testInvalidUniverseDomainWithMismatchCredentials() throws IOException { + BigQueryStorageSettings bigQueryStorageSettings = + BigQueryStorageSettings.newBuilder() + .setCredentialsProvider( + FixedCredentialsProvider.create( + loadCredentials(FAKE_JSON_CRED_WITH_INVALID_DOMAIN))) + .setUniverseDomain("invalid.domain") + .build(); + + BigQueryStorageClient localClient = BigQueryStorageClient.create(bigQueryStorageSettings); + + TableReference tableReference = + TableReference.newBuilder() + .setProjectId("bigquery-public-data") + .setDatasetId("samples") + .setTableId("shakespeare") + .build(); + + UnauthenticatedException e = + assertThrows( + UnauthenticatedException.class, + () -> + localClient.createReadSession( + /* tableReference= */ tableReference, + /* parent= */ parentProjectId, + /* requestedStreams= */ 1)); + assertThat( + (e.getMessage() + .contains("does not match the universe domain found in the credentials"))) + .isTrue(); + localClient.close(); + } + + @Test + void testUniverseDomainWithMatchingDomain() throws IOException { + // Test a valid domain using the default credentials and Google default universe domain. + BigQueryStorageSettings bigQueryStorageSettings = + BigQueryStorageSettings.newBuilder().setUniverseDomain("googleapis.com").build(); + BigQueryStorageClient localClient = BigQueryStorageClient.create(bigQueryStorageSettings); + + TableReference tableReference = + TableReference.newBuilder() + .setProjectId("bigquery-public-data") + .setDatasetId("samples") + .setTableId("shakespeare") + .build(); + + ReadSession session = + localClient.createReadSession( + /* tableReference= */ tableReference, + /* parent= */ parentProjectId, + /* requestedStreams= */ 1); + + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table reference '%s' CreateReadSession" + + " response:%n%s", + TextFormat.printer().shortDebugString(tableReference), session.toString())); + + StreamPosition readPosition = + StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + long rowCount = 0; + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + assertEquals(164_656, rowCount); + localClient.close(); + } + + void testUniverseDomain() throws IOException { + // This test is not yet part presubmit integration test as it requires the apis-tpclp.goog + // universe domain credentials. + // Test a valid domain using the default credentials and Google default universe domain. + BigQueryStorageSettings bigQueryStorageSettings = + BigQueryStorageSettings.newBuilder().setUniverseDomain("apis-tpclp.goog").build(); + BigQueryStorageClient localClient = BigQueryStorageClient.create(bigQueryStorageSettings); + + TableReference tableReference = + TableReference.newBuilder() + .setProjectId("google-tpc-testing-environment:cloudsdk-test-project") + .setDatasetId("tpc_demo_dataset") + .setTableId("new_table") + .build(); + + ReadSession session = + localClient.createReadSession( + /* tableReference= */ tableReference, + /* parent= */ parentProjectId, + /* requestedStreams= */ 1); + + StreamPosition readPosition = + StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + long rowCount = 0; + ServerStream stream = localClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + assertEquals(1, rowCount); + localClient.close(); + } + + /** + * Reads to the specified row offset within the stream. If the stream does not have the desired + * rows to read, it will read all of them. + * + * @param stream + * @param rowOffset + * @return the number of requested rows to skip or the total rows read if stream had less rows. + */ + private long ReadStreamToOffset(Stream stream, long rowOffset) { + StreamPosition readPosition = StreamPosition.newBuilder().setStream(stream).build(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + long rowCount = 0; + ServerStream serverStream = client.readRowsCallable().call(readRowsRequest); + Iterator responseIterator = serverStream.iterator(); + + while (responseIterator.hasNext()) { + ReadRowsResponse response = responseIterator.next(); + rowCount += response.getRowCount(); + if (rowCount >= rowOffset) { + return rowOffset; + } + } + + return rowCount; + } + + /** + * Reads all the rows from the specified tableReference. + * + *

For every row, the consumer is called for processing. + * + * @param tableReference + * @param snapshotInMillis Optional. If specified, all rows up to timestamp will be returned. + * @param filter Optional. If specified, it will be used to restrict returned data. + * @param consumer that receives all Avro rows. + * @throws IOException + */ + private void ProcessRowsAtSnapshot( + TableReference tableReference, Long snapshotInMillis, String filter, AvroRowConsumer consumer) + throws IOException { + Preconditions.checkNotNull(tableReference); + Preconditions.checkNotNull(consumer); + + CreateReadSessionRequest.Builder createSessionRequestBuilder = + CreateReadSessionRequest.newBuilder() + .setParent(parentProjectId) + .setRequestedStreams(1) + .setTableReference(tableReference) + .setFormat(DataFormat.AVRO); + + if (snapshotInMillis != null) { + Timestamp snapshotTimestamp = + Timestamp.newBuilder() + .setSeconds(snapshotInMillis / 1_000) + .setNanos((int) ((snapshotInMillis % 1000) * 1000000)) + .build(); + createSessionRequestBuilder.setTableModifiers( + TableModifiers.newBuilder().setSnapshotTime(snapshotTimestamp).build()); + } + + if (filter != null && !filter.isEmpty()) { + createSessionRequestBuilder.setReadOptions( + TableReadOptions.newBuilder().setRowRestriction(filter).build()); + } + + ReadSession session = client.createReadSession(createSessionRequestBuilder.build()); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table reference '%s' CreateReadSession" + + " response:%n%s", + TextFormat.printer().shortDebugString(tableReference), session.toString())); + + StreamPosition readPosition = + StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadPosition(readPosition).build(); + + SimpleRowReader reader = + new SimpleRowReader(new Schema.Parser().parse(session.getAvroSchema().getSchema())); + + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + reader.processRows(response.getAvroRows(), consumer); + } + } + + /** + * Reads all the rows from the specified table reference and returns a list as generic Avro + * records. + * + * @param tableReference + * @param filter Optional. If specified, it will be used to restrict returned data. + * @return + */ + List ReadAllRows(TableReference tableReference, String filter) + throws IOException { + final List rows = new ArrayList<>(); + ProcessRowsAtSnapshot( + /* tableReference= */ tableReference, + /* snapshotInMillis= */ null, + /* filter= */ filter, + new AvroRowConsumer() { + @Override + public void accept(GenericData.Record record) { + // clone the record since that reference will be reused by the reader. + rows.add(new GenericRecordBuilder(record).build()); + } + }); + return rows; + } + + /** + * Runs a query job with WRITE_APPEND disposition to the destination table and returns the + * successfully completed job. + * + * @param destinationTableId + * @param query + * @return + * @throws InterruptedException + */ + private Job RunQueryAppendJobAndExpectSuccess(TableId destinationTableId, String query) + throws InterruptedException { + return RunQueryJobAndExpectSuccess( + QueryJobConfiguration.newBuilder(query) + .setDestinationTable(destinationTableId) + .setUseQueryCache(false) + .setUseLegacySql(false) + .setWriteDisposition(WriteDisposition.WRITE_APPEND) + .build()); + } + + /** + * Runs a query job with provided configuration and returns the successfully completed job. + * + * @param configuration + * @return + * @throws InterruptedException + */ + private Job RunQueryJobAndExpectSuccess(QueryJobConfiguration configuration) + throws InterruptedException { + Job job = bigquery.create(JobInfo.of(configuration)); + Job completedJob = + job.waitFor( + RetryOption.initialRetryDelayDuration(Duration.ofSeconds(1)), + RetryOption.totalTimeoutDuration(Duration.ofMinutes(1))); + + assertNotNull(completedJob); + assertNull( + /* object= */ completedJob.getStatus().getError(), + /* message= */ "Received a job status that is not a success: " + + completedJob.getStatus().toString()); + + return completedJob; + } + + static ServiceAccountCredentials loadCredentials(String credentialFile) { + try (InputStream keyStream = new ByteArrayInputStream(credentialFile.getBytes())) { + return ServiceAccountCredentials.fromStream(keyStream); + } catch (IOException e) { + fail("Couldn't create fake JSON credentials."); + } + return null; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java new file mode 100644 index 000000000000..0e2cb7d7b6cd --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java @@ -0,0 +1,77 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1.it; + +import com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows; +import com.google.common.base.Preconditions; +import java.io.IOException; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.io.BinaryDecoder; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.DecoderFactory; + +/* + * SimpleRowReader handles deserialization of the Avro-encoded row blocks transmitted + * from the storage API using a generic datum decoder. + */ +public class SimpleRowReader { + + public interface AvroRowConsumer { + + /** + * Handler for every new Avro row that is read. + * + * @param record is Avro generic record structure. Consumers should not rely on the reference + * and should copy it if needed. The record reference is reused. + */ + void accept(GenericData.Record record); + } + + private final DatumReader datumReader; + + // Decoder object will be reused to avoid re-allocation and too much garbage collection. + private BinaryDecoder decoder = null; + + // Record object will be reused. + private GenericData.Record row = null; + + public SimpleRowReader(Schema schema) { + Preconditions.checkNotNull(schema); + datumReader = new GenericDatumReader<>(schema); + } + + /** + * Processes Avro rows by calling a consumer for each decoded row. + * + * @param avroRows object returned from the ReadRowsResponse. + * @param rowConsumer consumer that accepts GenericRecord. + */ + void processRows(AvroRows avroRows, AvroRowConsumer rowConsumer) throws IOException { + Preconditions.checkNotNull(avroRows); + Preconditions.checkNotNull(rowConsumer); + decoder = + DecoderFactory.get() + .binaryDecoder(avroRows.getSerializedBinaryRows().toByteArray(), decoder); + + while (!decoder.isEnd()) { + row = datumReader.read(row, decoder); + rowConsumer.accept(row); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java new file mode 100644 index 000000000000..0a960e01f67d --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java @@ -0,0 +1,160 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta1.stub; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StatusCode.Code; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.WatchdogProvider; +import com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession; +import com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse; +import com.google.protobuf.Empty; +import java.time.Duration; +import java.util.Set; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +public class EnhancedBigQueryStorageStubSettingsTest { + + @Test + void testSettingsArePreserved() { + String endpoint = "some.other.host:123"; + CredentialsProvider credentialsProvider = Mockito.mock(CredentialsProvider.class); + Duration watchdogInterval = Duration.ofSeconds(12); + WatchdogProvider watchdogProvider = Mockito.mock(WatchdogProvider.class); + + EnhancedBigQueryStorageStubSettings.Builder builder = + EnhancedBigQueryStorageStubSettings.newBuilder() + .setEndpoint(endpoint) + .setCredentialsProvider(credentialsProvider) + .setStreamWatchdogCheckIntervalDuration(watchdogInterval) + .setStreamWatchdogProvider(watchdogProvider); + + verifyBuilder(builder, endpoint, credentialsProvider, watchdogInterval, watchdogProvider); + + verifySettings( + builder.build(), endpoint, credentialsProvider, watchdogInterval, watchdogProvider); + + verifyBuilder( + builder.build().toBuilder(), + endpoint, + credentialsProvider, + watchdogInterval, + watchdogProvider); + } + + private void verifyBuilder( + EnhancedBigQueryStorageStubSettings.Builder builder, + String endpoint, + CredentialsProvider credentialsProvider, + Duration watchdogInterval, + WatchdogProvider watchdogProvider) { + assertThat(builder.getEndpoint()).isEqualTo(endpoint); + assertThat(builder.getCredentialsProvider()).isEqualTo(credentialsProvider); + assertThat(builder.getStreamWatchdogCheckIntervalDuration()).isEqualTo(watchdogInterval); + assertThat(builder.getStreamWatchdogProvider()).isEqualTo(watchdogProvider); + + InstantiatingGrpcChannelProvider channelProvider = + (InstantiatingGrpcChannelProvider) builder.getTransportChannelProvider(); + assertThat(channelProvider.toBuilder().getMaxInboundMessageSize()).isEqualTo(Integer.MAX_VALUE); + } + + private void verifySettings( + EnhancedBigQueryStorageStubSettings settings, + String endpoint, + CredentialsProvider credentialsProvider, + Duration watchdogInterval, + WatchdogProvider watchdogProvider) { + assertThat(settings.getEndpoint()).isEqualTo(endpoint); + assertThat(settings.getCredentialsProvider()).isEqualTo(credentialsProvider); + assertThat(settings.getStreamWatchdogCheckIntervalDuration()).isEqualTo(watchdogInterval); + assertThat(settings.getStreamWatchdogProvider()).isEqualTo(watchdogProvider); + + InstantiatingGrpcChannelProvider channelProvider = + (InstantiatingGrpcChannelProvider) settings.getTransportChannelProvider(); + assertThat(channelProvider.toBuilder().getMaxInboundMessageSize()).isEqualTo(Integer.MAX_VALUE); + } + + @Test + void testCreateReadSessionSettings() { + UnaryCallSettings.Builder builder = + EnhancedBigQueryStorageStubSettings.newBuilder().createReadSessionSettings(); + verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); + } + + @Test + void testReadRowsSettings() { + ServerStreamingCallSettings.Builder builder = + EnhancedBigQueryStorageStubSettings.newBuilder().readRowsSettings(); + assertThat(builder.getRetryableCodes()).contains(Code.UNAVAILABLE); + RetrySettings retrySettings = builder.getRetrySettings(); + assertThat(retrySettings.getInitialRetryDelayDuration()).isEqualTo(Duration.ofMillis(100L)); + assertThat(retrySettings.getRetryDelayMultiplier()).isWithin(1e-6).of(1.3); + assertThat(retrySettings.getMaxRetryDelayDuration()).isEqualTo(Duration.ofMinutes(1L)); + assertThat(retrySettings.getInitialRpcTimeoutDuration()).isEqualTo(Duration.ofDays(1L)); + assertThat(retrySettings.getRpcTimeoutMultiplier()).isWithin(1e-6).of(1.0); + assertThat(retrySettings.getMaxRpcTimeoutDuration()).isEqualTo(Duration.ofDays(1L)); + assertThat(retrySettings.getTotalTimeoutDuration()).isEqualTo(Duration.ofDays(1L)); + assertThat(builder.getIdleTimeoutDuration()).isEqualTo(Duration.ZERO); + } + + @Test + void testBatchCreateReadSessionStreamsSettings() { + UnaryCallSettings.Builder< + BatchCreateReadSessionStreamsRequest, BatchCreateReadSessionStreamsResponse> + builder = + EnhancedBigQueryStorageStubSettings.newBuilder() + .batchCreateReadSessionStreamsSettings(); + verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); + } + + @Test + void testFinalizeStreamSettings() { + UnaryCallSettings.Builder builder = + EnhancedBigQueryStorageStubSettings.newBuilder().finalizeStreamSettings(); + verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); + } + + @Test + void testSplitReadStreamSettings() { + UnaryCallSettings.Builder builder = + EnhancedBigQueryStorageStubSettings.newBuilder().splitReadStreamSettings(); + verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); + } + + private void verifyRetrySettings(Set retryCodes, RetrySettings retrySettings) { + assertThat(retryCodes).contains(Code.UNAVAILABLE); + assertThat(retrySettings.getTotalTimeoutDuration()).isGreaterThan(Duration.ZERO); + assertThat(retrySettings.getInitialRetryDelayDuration()).isGreaterThan(Duration.ZERO); + assertThat(retrySettings.getRetryDelayMultiplier()).isAtLeast(1.0); + assertThat(retrySettings.getMaxRetryDelayDuration()).isGreaterThan(Duration.ZERO); + assertThat(retrySettings.getInitialRpcTimeoutDuration()).isGreaterThan(Duration.ZERO); + assertThat(retrySettings.getRpcTimeoutMultiplier()).isAtLeast(1.0); + assertThat(retrySettings.getMaxRpcTimeoutDuration()).isGreaterThan(Duration.ZERO); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java new file mode 100644 index 000000000000..6e35db1303bb --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java @@ -0,0 +1,174 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta1.stub; + +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.InProcessServer; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.api.gax.rpc.UnimplementedException; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageGrpc.BigQueryStorageImplBase; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageSettings; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession; +import com.google.cloud.bigquery.storage.v1beta1.Storage.Stream; +import com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition; +import com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference; +import java.util.regex.Pattern; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class ResourceHeaderTest { + + private static final TableReference TEST_TABLE_REFERENCE = + TableReference.newBuilder() + .setProjectId("project") + .setDatasetId("dataset") + .setTableId("table") + .build(); + + private static final ReadSession TEST_SESSION = + ReadSession.newBuilder().setName("sessionName").build(); + + private static final Stream TEST_STREAM = Stream.newBuilder().setName("streamName").build(); + + private static final String NAME = "resource-header-test:123-v1beta1"; + + private static final String HEADER_NAME = "x-goog-request-params"; + + private static final Pattern DATASET_PATTERN = + Pattern.compile(".*" + "table_reference\\.dataset_id=dataset" + ".*"); + private static final Pattern ORIGINAL_STREAM_PATTERN = + Pattern.compile(".*" + "original_stream\\.name=streamName" + ".*"); + private static final Pattern PROJECT_PATTERN = + Pattern.compile(".*" + "table_reference\\.project_id=project" + ".*"); + private static final Pattern READ_POSITION_PATTERN = + Pattern.compile(".*" + "read_position\\.stream\\.name=streamName" + ".*"); + private static final Pattern SESSION_PATTERN = + Pattern.compile(".*" + "session\\.name=sessionName" + ".*"); + private static final Pattern STREAM_PATTERN = + Pattern.compile(".*" + "stream\\.name=streamName" + ".*"); + + private static final String TEST_HEADER_NAME = "simple-header-name"; + private static final String TEST_HEADER_VALUE = "simple-header-value"; + private static final Pattern TEST_PATTERN = Pattern.compile(".*" + TEST_HEADER_VALUE + ".*"); + + private static InProcessServer server; + + private LocalChannelProvider channelProvider; + private BigQueryStorageClient client; + + @BeforeAll + public static void setUpClass() throws Exception { + server = new InProcessServer<>(new BigQueryStorageImplBase() {}, NAME); + server.start(); + } + + @BeforeEach + void setUp() throws Exception { + channelProvider = LocalChannelProvider.create(NAME); + BigQueryStorageSettings.Builder settingsBuilder = + BigQueryStorageSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setHeaderProvider(FixedHeaderProvider.create(TEST_HEADER_NAME, TEST_HEADER_VALUE)) + .setTransportChannelProvider(channelProvider); + client = BigQueryStorageClient.create(settingsBuilder.build()); + } + + @AfterEach + void tearDown() throws Exception { + client.close(); + } + + @AfterAll + static void tearDownClass() throws Exception { + server.stop(); + server.blockUntilShutdown(); + } + + @Test + void createReadSessionTest() { + try { + client.createReadSession(TEST_TABLE_REFERENCE, "parents/project", 1); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + verifyHeaderSent(PROJECT_PATTERN, DATASET_PATTERN); + } + + @Test + void readRowsTest() { + try { + ReadRowsRequest request = + ReadRowsRequest.newBuilder() + .setReadPosition(StreamPosition.newBuilder().setStream(TEST_STREAM).setOffset(125)) + .build(); + client.readRowsCallable().call(request); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + + verifyHeaderSent(READ_POSITION_PATTERN); + } + + @Test + void batchCreateReadStreamsForSessionTest() { + try { + client.batchCreateReadSessionStreams(TEST_SESSION, 1); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + + verifyHeaderSent(SESSION_PATTERN); + } + + @Test + void finalizeStreamTest() { + try { + client.finalizeStream(TEST_STREAM); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + + verifyHeaderSent(STREAM_PATTERN); + } + + @Test + void splitReadStreamTest() { + try { + client.splitReadStream(TEST_STREAM); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + + verifyHeaderSent(ORIGINAL_STREAM_PATTERN); + } + + private void verifyHeaderSent(Pattern... patterns) { + for (Pattern pattern : patterns) { + boolean headerSent = channelProvider.isHeaderSent(HEADER_NAME, pattern); + assertWithMessage("Generated header was sent").that(headerSent).isTrue(); + } + boolean testHeaderSent = channelProvider.isHeaderSent(TEST_HEADER_NAME, TEST_PATTERN); + assertWithMessage("Provided header was sent").that(testHeaderSent).isTrue(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java new file mode 100644 index 000000000000..b75cb8aeab65 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java @@ -0,0 +1,250 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta1.stub.readrows; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.InProcessServer; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageGrpc.BigQueryStorageImplBase; +import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageSettings; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta1.Storage.Stream; +import com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition; +import com.google.common.collect.Queues; +import io.grpc.Status.Code; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.List; +import java.util.Queue; +import java.util.UUID; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class ReadRowsRetryTest { + + private static final Stream DEFAULT_STREAM = Stream.newBuilder().setName("defaultStream").build(); + + private TestBigQueryStorageService service; + private BigQueryStorageClient client; + private InProcessServer server; + private LocalChannelProvider channelProvider; + + @BeforeEach + void setUp() throws Exception { + service = new TestBigQueryStorageService(); + String serverName = UUID.randomUUID().toString(); + server = new InProcessServer<>(service, serverName); + server.start(); + channelProvider = LocalChannelProvider.create(serverName); + + BigQueryStorageSettings settings = + BigQueryStorageSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setTransportChannelProvider(channelProvider) + .build(); + + client = BigQueryStorageClient.create(settings); + } + + @AfterEach + void tearDown() throws Exception { + client.close(); + server.stop(); + server.blockUntilShutdown(); + } + + @Test + void happyPathTest() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7)); + + assertEquals(17, getRowCount(request)); + } + + @Test + void immediateRetryTest() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7)); + + assertEquals(17, getRowCount(request)); + } + + @Test + void multipleRetryTestWithZeroInitialOffset() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithNumberOfRows(5) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 5) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create().expectRequest("fake-stream", 22).respondWithNumberOfRows(6)); + + assertEquals(28, getRowCount(request)); + } + + @Test + void multipleRetryTestWithNonZeroInitialOffset() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 17); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 17) + .respondWithNumberOfRows(5) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 22) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create().expectRequest("fake-stream", 39).respondWithNumberOfRows(3)); + + assertEquals(25, getRowCount(request)); + } + + @Test + void errorAtTheVeryEndTest() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create().expectRequest("fake-stream", 17).respondWithNumberOfRows(0)); + + assertEquals(17, getRowCount(request)); + } + + private int getRowCount(ReadRowsRequest request) { + ServerStream serverStream = client.readRowsCallable().call(request); + int rowCount = 0; + for (ReadRowsResponse readRowsResponse : serverStream) { + rowCount += readRowsResponse.getRowCount(); + } + return rowCount; + } + + private static class TestBigQueryStorageService extends BigQueryStorageImplBase { + + Queue expectations = Queues.newArrayDeque(); + int currentRequestIndex = -1; + + @Override + public void readRows( + ReadRowsRequest request, StreamObserver responseObserver) { + + RpcExpectation expectedRpc = expectations.poll(); + currentRequestIndex++; + + assertNotNull( + expectedRpc, "Unexpected request #" + currentRequestIndex + ": " + request.toString()); + assertEquals( + expectedRpc.expectedRequest, + request, + "Expected request #" + + currentRequestIndex + + " does not match actual request: " + + request.toString()); + for (ReadRowsResponse response : expectedRpc.responses) { + responseObserver.onNext(response); + } + + if (expectedRpc.statusCode.toStatus().isOk()) { + responseObserver.onCompleted(); + } else { + responseObserver.onError(expectedRpc.statusCode.toStatus().asRuntimeException()); + } + } + } + + private static class RpcExpectation { + + ReadRowsRequest expectedRequest; + Code statusCode; + List responses; + + private RpcExpectation() { + statusCode = Code.OK; + responses = new ArrayList<>(); + } + + static RpcExpectation create() { + return new RpcExpectation(); + } + + static ReadRowsRequest createRequest(String streamName, long offset) { + return ReadRowsRequest.newBuilder() + .setReadPosition( + StreamPosition.newBuilder() + .setStream(Stream.newBuilder().setName(streamName)) + .setOffset(offset)) + .build(); + } + + static ReadRowsResponse createResponse(int numberOfRows) { + return ReadRowsResponse.newBuilder().setRowCount(numberOfRows).build(); + } + + RpcExpectation expectRequest(String streamName, long offset) { + expectedRequest = createRequest(streamName, offset); + return this; + } + + RpcExpectation respondWithNumberOfRows(int numberOfRows) { + responses.add(createResponse(numberOfRows)); + return this; + } + + RpcExpectation respondWithStatus(Code code) { + this.statusCode = code; + return this; + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java new file mode 100644 index 000000000000..abf8927eb33d --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java @@ -0,0 +1,285 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StatusCode; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Timestamp; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class BaseBigQueryReadClientTest { + private static MockBigQueryRead mockBigQueryRead; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private BaseBigQueryReadClient client; + + @BeforeClass + public static void startStaticServer() { + mockBigQueryRead = new MockBigQueryRead(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockBigQueryRead)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + BaseBigQueryReadSettings settings = + BaseBigQueryReadSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = BaseBigQueryReadClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void createReadSessionTest() throws Exception { + ReadSession expectedResponse = + ReadSession.newBuilder() + .setName("name3373707") + .setExpireTime(Timestamp.newBuilder().build()) + .setDataFormat(DataFormat.forNumber(0)) + .setTable(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .setTableModifiers(ReadSession.TableModifiers.newBuilder().build()) + .setReadOptions(ReadSession.TableReadOptions.newBuilder().build()) + .addAllStreams(new ArrayList()) + .build(); + mockBigQueryRead.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + + ReadSession actualResponse = client.createReadSession(parent, readSession, maxStreamCount); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryRead.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateReadSessionRequest actualRequest = ((CreateReadSessionRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(readSession, actualRequest.getReadSession()); + Assert.assertEquals(maxStreamCount, actualRequest.getMaxStreamCount()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createReadSessionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + client.createReadSession(parent, readSession, maxStreamCount); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createReadSessionTest2() throws Exception { + ReadSession expectedResponse = + ReadSession.newBuilder() + .setName("name3373707") + .setExpireTime(Timestamp.newBuilder().build()) + .setDataFormat(DataFormat.forNumber(0)) + .setTable(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString()) + .setTableModifiers(ReadSession.TableModifiers.newBuilder().build()) + .setReadOptions(ReadSession.TableReadOptions.newBuilder().build()) + .addAllStreams(new ArrayList()) + .build(); + mockBigQueryRead.addResponse(expectedResponse); + + String parent = "parent-995424086"; + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + + ReadSession actualResponse = client.createReadSession(parent, readSession, maxStreamCount); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryRead.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateReadSessionRequest actualRequest = ((CreateReadSessionRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(readSession, actualRequest.getReadSession()); + Assert.assertEquals(maxStreamCount, actualRequest.getMaxStreamCount()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createReadSessionExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + + try { + String parent = "parent-995424086"; + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + client.createReadSession(parent, readSession, maxStreamCount); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void readRowsTest() throws Exception { + ReadRowsResponse expectedResponse = + ReadRowsResponse.newBuilder() + .setRowCount(1340416618) + .setStats(StreamStats.newBuilder().build()) + .setThrottleState(ThrottleState.newBuilder().build()) + .build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = + ReadRowsRequest.newBuilder() + .setReadStream( + ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString()) + .setOffset(-1019779949) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void readRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + ReadRowsRequest request = + ReadRowsRequest.newBuilder() + .setReadStream( + ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString()) + .setOffset(-1019779949) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void splitReadStreamTest() throws Exception { + SplitReadStreamResponse expectedResponse = + SplitReadStreamResponse.newBuilder() + .setPrimaryStream(ReadStream.newBuilder().build()) + .setRemainderStream(ReadStream.newBuilder().build()) + .build(); + mockBigQueryRead.addResponse(expectedResponse); + + SplitReadStreamRequest request = + SplitReadStreamRequest.newBuilder() + .setName( + ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString()) + .setFraction(-1653751294) + .build(); + + SplitReadStreamResponse actualResponse = client.splitReadStream(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryRead.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + SplitReadStreamRequest actualRequest = ((SplitReadStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getName(), actualRequest.getName()); + Assert.assertEquals(request.getFraction(), actualRequest.getFraction(), 0.0001); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void splitReadStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + + try { + SplitReadStreamRequest request = + SplitReadStreamRequest.newBuilder() + .setName( + ReadStreamName.of("[PROJECT]", "[LOCATION]", "[SESSION]", "[STREAM]").toString()) + .setFraction(-1653751294) + .build(); + client.splitReadStream(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoderTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoderTest.java new file mode 100644 index 000000000000..3d35b7764c0e --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoderTest.java @@ -0,0 +1,55 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.google.protobuf.ByteString; +import java.math.BigDecimal; +import org.junit.jupiter.api.Test; + +class BigDecimalByteStringEncoderTest { + @Test + void testEncodeBigDecimalandEncodeByteString() { + BigDecimal testBD = new BigDecimal("0"); // expected result bd + ByteString testBS = + BigDecimalByteStringEncoder.encodeToNumericByteString(testBD); // convert expected to bs + BigDecimal resultBD = + BigDecimalByteStringEncoder.decodeNumericByteString(testBS); // convert bs to bd + assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd + + testBD = new BigDecimal("1.2"); + testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD); + resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS); + assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd + + testBD = new BigDecimal("-1.2"); + testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD); + resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS); + assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd + + testBD = new BigDecimal("99999999999999999999999999999.999999999"); + testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD); + resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS); + assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd + + testBD = new BigDecimal("-99999999999999999999999999999.999999999"); + testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD); + resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS); + assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClientTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClientTest.java new file mode 100644 index 000000000000..02172a305d78 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClientTest.java @@ -0,0 +1,335 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.InternalException; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.ResourceExhaustedException; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StatusCode; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Duration; +import com.google.protobuf.Parser; +import com.google.rpc.RetryInfo; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class BigQueryReadClientTest { + private static MockBigQueryRead mockBigQueryRead; + private static MockServiceHelper serviceHelper; + private BigQueryReadClient client; + private LocalChannelProvider channelProvider; + private int retryCount; + private Code lastRetryStatusCode; + + @BeforeAll + static void startStaticServer() { + mockBigQueryRead = new MockBigQueryRead(); + serviceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockBigQueryRead)); + serviceHelper.start(); + } + + @AfterAll + static void stopServer() { + serviceHelper.stop(); + } + + @BeforeEach + void setUp() throws IOException { + serviceHelper.reset(); + channelProvider = serviceHelper.createChannelProvider(); + retryCount = 0; + lastRetryStatusCode = Code.OK; + BigQueryReadSettings settings = + BigQueryReadSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setReadRowsRetryAttemptListener( + new BigQueryReadSettings.RetryAttemptListener() { + @Override + public void onRetryAttempt(Status prevStatus, Metadata prevMetadata) { + synchronized (this) { + retryCount += 1; + lastRetryStatusCode = prevStatus.getCode(); + } + } + }) + .build(); + client = BigQueryReadClient.create(settings); + } + + @AfterEach + void tearDown() throws Exception { + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + + @Test + @SuppressWarnings("all") + void createReadSessionTest() { + String name = "name3373707"; + String table = "table110115790"; + ReadSession expectedResponse = ReadSession.newBuilder().setName(name).setTable(table).build(); + mockBigQueryRead.addResponse(expectedResponse); + + String parent = "parent-995424086"; + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + + ReadSession actualResponse = client.createReadSession(parent, readSession, maxStreamCount); + assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryRead.getRequests(); + assertEquals(1, actualRequests.size()); + CreateReadSessionRequest actualRequest = (CreateReadSessionRequest) actualRequests.get(0); + + assertEquals(parent, actualRequest.getParent()); + assertEquals(readSession, actualRequest.getReadSession()); + assertEquals(maxStreamCount, actualRequest.getMaxStreamCount()); + assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + void createReadSessionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + + String parent = "parent-995424086"; + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + + assertThrows( + InvalidArgumentException.class, + () -> client.createReadSession(parent, readSession, maxStreamCount)); + } + + @Test + @SuppressWarnings("all") + void readRowsTest() throws Exception { + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + List actualResponses = responseObserver.future().get(); + assertEquals(1, actualResponses.size()); + assertEquals(expectedResponse, actualResponses.get(0)); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); + } + + @Test + @SuppressWarnings("all") + void readRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + ExecutionException e = + assertThrows(ExecutionException.class, () -> responseObserver.future().get()); + assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); + assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); + } + + @Test + @SuppressWarnings("all") + void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException { + ApiException exception = + new InternalException( + new StatusRuntimeException( + Status.INTERNAL.withDescription( + "Received unexpected EOS on DATA frame from server")), + GrpcStatusCode.of(Code.INTERNAL), + /* retryable= */ false); + mockBigQueryRead.addException(exception); + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + List actualResponses = responseObserver.future().get(); + assertEquals(1, actualResponses.size()); + + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.INTERNAL); + } + + @Test + @SuppressWarnings("all") + void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException { + ApiException exception = + new InternalException( + new StatusRuntimeException( + Status.INTERNAL.withDescription( + "HTTP/2 error code: INTERNAL_ERROR\nReceived Rst Stream")), + GrpcStatusCode.of(Code.INTERNAL), + /* retryable= */ false); + mockBigQueryRead.addException(exception); + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + List actualResponses = responseObserver.future().get(); + assertEquals(1, actualResponses.size()); + + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.INTERNAL); + } + + @Test + @SuppressWarnings("all") + void readRowsNoRetryForResourceExhaustedWithoutRetryInfo() + throws ExecutionException, InterruptedException { + ApiException exception = + new ResourceExhaustedException( + new StatusRuntimeException( + Status.RESOURCE_EXHAUSTED.withDescription("You are out of quota X")), + GrpcStatusCode.of(Code.RESOURCE_EXHAUSTED), + /* retryable= */ false); + mockBigQueryRead.addException(exception); + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + ExecutionException e = + assertThrows(ExecutionException.class, () -> responseObserver.future().get()); + assertTrue(e.getCause() instanceof ResourceExhaustedException); + ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause(); + assertEquals(StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode()); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); + } + + @Test + @SuppressWarnings("all") + void readRowsNoRetryForResourceExhaustedWithRetryInfo() + throws ExecutionException, InterruptedException { + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay(Duration.newBuilder().setSeconds(2).setNanos(456).build()) + .build(); + + Metadata metadata = new Metadata(); + metadata.put( + Metadata.Key.of( + "google.rpc.retryinfo-bin", + new Metadata.BinaryMarshaller() { + @Override + public byte[] toBytes(RetryInfo value) { + return value.toByteArray(); + } + + @Override + public RetryInfo parseBytes(byte[] serialized) { + try { + Parser parser = (RetryInfo.newBuilder().build()).getParserForType(); + return parser.parseFrom(serialized); + } catch (Exception e) { + return null; + } + } + }), + retryInfo); + + ApiException exception = + new ResourceExhaustedException( + new StatusRuntimeException( + Status.RESOURCE_EXHAUSTED.withDescription("Try again in a bit"), metadata), + GrpcStatusCode.of(Code.RESOURCE_EXHAUSTED), + /* retryable= */ false); + mockBigQueryRead.addException(exception); + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + List actualResponses = responseObserver.future().get(); + assertEquals(1, actualResponses.size()); + + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.RESOURCE_EXHAUSTED); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClientTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClientTest.java new file mode 100644 index 000000000000..eccb9ed65804 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClientTest.java @@ -0,0 +1,500 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Int64Value; +import com.google.protobuf.Timestamp; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class BigQueryWriteClientTest { + private static MockBigQueryWrite mockBigQueryWrite; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private BigQueryWriteClient client; + + @BeforeClass + public static void startStaticServer() { + mockBigQueryWrite = new MockBigQueryWrite(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockBigQueryWrite)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + BigQueryWriteSettings settings = + BigQueryWriteSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = BigQueryWriteClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void createWriteStreamTest() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); + WriteStream writeStream = WriteStream.newBuilder().build(); + + WriteStream actualResponse = client.createWriteStream(parent, writeStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateWriteStreamRequest actualRequest = ((CreateWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(writeStream, actualRequest.getWriteStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createWriteStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); + WriteStream writeStream = WriteStream.newBuilder().build(); + client.createWriteStream(parent, writeStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createWriteStreamTest2() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String parent = "parent-995424086"; + WriteStream writeStream = WriteStream.newBuilder().build(); + + WriteStream actualResponse = client.createWriteStream(parent, writeStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateWriteStreamRequest actualRequest = ((CreateWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(writeStream, actualRequest.getWriteStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createWriteStreamExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String parent = "parent-995424086"; + WriteStream writeStream = WriteStream.newBuilder().build(); + client.createWriteStream(parent, writeStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void appendRowsTest() throws Exception { + AppendRowsResponse expectedResponse = + AppendRowsResponse.newBuilder().setUpdatedSchema(TableSchema.newBuilder().build()).build(); + mockBigQueryWrite.addResponse(expectedResponse); + AppendRowsRequest request = + AppendRowsRequest.newBuilder() + .setWriteStream( + WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setOffset(Int64Value.newBuilder().build()) + .setTraceId("traceId-1067401920") + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.appendRowsCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + requestObserver.onCompleted(); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void appendRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + AppendRowsRequest request = + AppendRowsRequest.newBuilder() + .setWriteStream( + WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setOffset(Int64Value.newBuilder().build()) + .setTraceId("traceId-1067401920") + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.appendRowsCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void getWriteStreamTest() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + + WriteStream actualResponse = client.getWriteStream(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetWriteStreamRequest actualRequest = ((GetWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getWriteStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + client.getWriteStream(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getWriteStreamTest2() throws Exception { + WriteStream expectedResponse = + WriteStream.newBuilder() + .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setCommitTime(Timestamp.newBuilder().build()) + .setTableSchema(TableSchema.newBuilder().build()) + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String name = "name3373707"; + + WriteStream actualResponse = client.getWriteStream(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetWriteStreamRequest actualRequest = ((GetWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getWriteStreamExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String name = "name3373707"; + client.getWriteStream(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void finalizeWriteStreamTest() throws Exception { + FinalizeWriteStreamResponse expectedResponse = + FinalizeWriteStreamResponse.newBuilder().setRowCount(1340416618).build(); + mockBigQueryWrite.addResponse(expectedResponse); + + WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + + FinalizeWriteStreamResponse actualResponse = client.finalizeWriteStream(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + FinalizeWriteStreamRequest actualRequest = ((FinalizeWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void finalizeWriteStreamExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + client.finalizeWriteStream(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void finalizeWriteStreamTest2() throws Exception { + FinalizeWriteStreamResponse expectedResponse = + FinalizeWriteStreamResponse.newBuilder().setRowCount(1340416618).build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String name = "name3373707"; + + FinalizeWriteStreamResponse actualResponse = client.finalizeWriteStream(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + FinalizeWriteStreamRequest actualRequest = ((FinalizeWriteStreamRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void finalizeWriteStreamExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String name = "name3373707"; + client.finalizeWriteStream(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchCommitWriteStreamsTest() throws Exception { + BatchCommitWriteStreamsResponse expectedResponse = + BatchCommitWriteStreamsResponse.newBuilder() + .setCommitTime(Timestamp.newBuilder().build()) + .addAllStreamErrors(new ArrayList()) + .build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + BatchCommitWriteStreamsResponse actualResponse = client.batchCommitWriteStreams(parent); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchCommitWriteStreamsRequest actualRequest = + ((BatchCommitWriteStreamsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchCommitWriteStreamsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String parent = "parent-995424086"; + client.batchCommitWriteStreams(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void flushRowsTest() throws Exception { + FlushRowsResponse expectedResponse = + FlushRowsResponse.newBuilder().setOffset(-1019779949).build(); + mockBigQueryWrite.addResponse(expectedResponse); + + WriteStreamName writeStream = + WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + + FlushRowsResponse actualResponse = client.flushRows(writeStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + FlushRowsRequest actualRequest = ((FlushRowsRequest) actualRequests.get(0)); + + Assert.assertEquals(writeStream.toString(), actualRequest.getWriteStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void flushRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + WriteStreamName writeStream = + WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); + client.flushRows(writeStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void flushRowsTest2() throws Exception { + FlushRowsResponse expectedResponse = + FlushRowsResponse.newBuilder().setOffset(-1019779949).build(); + mockBigQueryWrite.addResponse(expectedResponse); + + String writeStream = "writeStream1412231231"; + + FlushRowsResponse actualResponse = client.flushRows(writeStream); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryWrite.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + FlushRowsRequest actualRequest = ((FlushRowsRequest) actualRequests.get(0)); + + Assert.assertEquals(writeStream, actualRequest.getWriteStream()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void flushRowsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + + try { + String writeStream = "writeStream1412231231"; + client.flushRows(writeStream); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryRead.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryRead.java new file mode 100644 index 000000000000..c147e00be81f --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryRead.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBigQueryRead implements MockGrpcService { + private final MockBigQueryReadImpl serviceImpl; + + public MockBigQueryRead() { + serviceImpl = new MockBigQueryReadImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryReadImpl.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryReadImpl.java new file mode 100644 index 000000000000..a5cefb788f6a --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryReadImpl.java @@ -0,0 +1,122 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadGrpc.BigQueryReadImplBase; +import com.google.protobuf.AbstractMessage; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBigQueryReadImpl extends BigQueryReadImplBase { + private List requests; + private Queue responses; + + public MockBigQueryReadImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createReadSession( + CreateReadSessionRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ReadSession) { + requests.add(request); + responseObserver.onNext(((ReadSession) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateReadSession, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ReadSession.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void readRows(ReadRowsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ReadRowsResponse) { + requests.add(request); + responseObserver.onNext(((ReadRowsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ReadRows, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ReadRowsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void splitReadStream( + SplitReadStreamRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof SplitReadStreamResponse) { + requests.add(request); + responseObserver.onNext(((SplitReadStreamResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method SplitReadStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + SplitReadStreamResponse.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWrite.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWrite.java new file mode 100644 index 000000000000..545a0dbae9e6 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWrite.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBigQueryWrite implements MockGrpcService { + private final MockBigQueryWriteImpl serviceImpl; + + public MockBigQueryWrite() { + serviceImpl = new MockBigQueryWriteImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java new file mode 100644 index 000000000000..04c596ff526e --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java @@ -0,0 +1,205 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryWriteGrpc.BigQueryWriteImplBase; +import com.google.protobuf.AbstractMessage; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockBigQueryWriteImpl extends BigQueryWriteImplBase { + private List requests; + private Queue responses; + + public MockBigQueryWriteImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createWriteStream( + CreateWriteStreamRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof WriteStream) { + requests.add(request); + responseObserver.onNext(((WriteStream) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateWriteStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + WriteStream.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public StreamObserver appendRows( + final StreamObserver responseObserver) { + StreamObserver requestObserver = + new StreamObserver() { + @Override + public void onNext(AppendRowsRequest value) { + requests.add(value); + final Object response = responses.remove(); + if (response instanceof AppendRowsResponse) { + responseObserver.onNext(((AppendRowsResponse) response)); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method AppendRows, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + AppendRowsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } + + @Override + public void onCompleted() { + responseObserver.onCompleted(); + } + }; + return requestObserver; + } + + @Override + public void getWriteStream( + GetWriteStreamRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof WriteStream) { + requests.add(request); + responseObserver.onNext(((WriteStream) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetWriteStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + WriteStream.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void finalizeWriteStream( + FinalizeWriteStreamRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof FinalizeWriteStreamResponse) { + requests.add(request); + responseObserver.onNext(((FinalizeWriteStreamResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method FinalizeWriteStream, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + FinalizeWriteStreamResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchCommitWriteStreams( + BatchCommitWriteStreamsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BatchCommitWriteStreamsResponse) { + requests.add(request); + responseObserver.onNext(((BatchCommitWriteStreamsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + BatchCommitWriteStreamsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void flushRows( + FlushRowsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof FlushRowsResponse) { + requests.add(request); + responseObserver.onNext(((FlushRowsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method FlushRows, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + FlushRowsResponse.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/BigQueryResource.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/BigQueryResource.java new file mode 100644 index 000000000000..64f58d3bfedf --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/BigQueryResource.java @@ -0,0 +1,34 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.it; + +/** Test helper class to generate BigQuery resource paths. */ +public class BigQueryResource { + + /** + * Returns a BigQuery table resource path from the provided parameters into the following format: + * projects/{projectId}/datasets/{datasetId}/tables/{tableId} + * + * @param projectId + * @param datasetId + * @param tableId + * @return a path to a table resource. + */ + public static String FormatTableResource(String projectId, String datasetId, String tableId) { + return String.format("projects/%s/datasets/%s/tables/%s", projectId, datasetId, tableId); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageLongRunningTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageLongRunningTest.java new file mode 100644 index 000000000000..636132224837 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageLongRunningTest.java @@ -0,0 +1,143 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.it; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.ServiceOptions; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1beta2.DataFormat; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession; +import com.google.cloud.bigquery.storage.v1beta2.ReadStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.logging.Logger; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +/** + * Integration tests for BigQuery Storage API which target long running sessions. These tests can be + * enabled by setting the system property 'bigquery.storage.enable_long_running_tests' to true. + */ +class ITBigQueryStorageLongRunningTest { + + private static final Logger LOG = + Logger.getLogger(ITBigQueryStorageLongRunningTest.class.getName()); + + private static final String LONG_TESTS_ENABLED_PROPERTY = + "bigquery.storage.enable_long_running_tests"; + + private static final String LONG_TESTS_DISABLED_MESSAGE = + String.format( + "BigQuery Storage long running tests are not enabled and will be skipped. " + + "To enable them, set system property '%s' to true.", + LONG_TESTS_ENABLED_PROPERTY); + + private static BigQueryReadClient client; + private static String parentProjectId; + + @BeforeAll + static void beforeAll() throws IOException { + Assumptions.assumeTrue( + Boolean.getBoolean(LONG_TESTS_ENABLED_PROPERTY), LONG_TESTS_DISABLED_MESSAGE); + client = BigQueryReadClient.create(); + parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId()); + + LOG.info( + String.format( + "%s tests running with parent project: %s", + ITBigQueryStorageLongRunningTest.class.getSimpleName(), parentProjectId)); + } + + @AfterAll + static void afterAll() { + if (client != null) { + client.close(); + } + } + + @Test + void testLongRunningReadSession() throws InterruptedException, ExecutionException { + // This test reads a larger table with the goal of doing a simple validation of timeout settings + // for a longer running session. + + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "wikipedia"); + + ReadSession session = + client.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 5); + + assertEquals( + 5, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + List> tasks = new ArrayList<>(session.getStreamsCount()); + for (final ReadStream stream : session.getStreamsList()) { + tasks.add(() -> readAllRowsFromStream(stream)); + } + + ExecutorService executor = Executors.newFixedThreadPool(tasks.size()); + List> results = executor.invokeAll(tasks); + + long rowCount = 0; + for (Future result : results) { + rowCount += result.get(); + } + + assertEquals(313_797_035, rowCount); + } + + private long readAllRowsFromStream(ReadStream readStream) { + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(readStream.getName()).build(); + + long rowCount = 0; + ServerStream serverStream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : serverStream) { + rowCount += response.getRowCount(); + } + + LOG.info( + String.format("Read total of %d rows from stream '%s'.", rowCount, readStream.getName())); + return rowCount; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageTest.java new file mode 100644 index 000000000000..3d59ba40c0ea --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageTest.java @@ -0,0 +1,1311 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.it; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; + +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.rpc.ServerStream; +import com.google.api.gax.rpc.UnauthenticatedException; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.RetryOption; +import com.google.cloud.ServiceOptions; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.Field.Mode; +import com.google.cloud.bigquery.Job; +import com.google.cloud.bigquery.JobInfo; +import com.google.cloud.bigquery.JobInfo.WriteDisposition; +import com.google.cloud.bigquery.LegacySQLTypeName; +import com.google.cloud.bigquery.QueryJobConfiguration; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import com.google.cloud.bigquery.TimePartitioning; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadSettings; +import com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta2.DataFormat; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions; +import com.google.cloud.bigquery.storage.v1beta2.ReadStream; +import com.google.cloud.bigquery.storage.v1beta2.it.SimpleRowReader.AvroRowConsumer; +import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; +import com.google.common.base.Preconditions; +import com.google.protobuf.Timestamp; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.math.BigDecimal; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.avro.Conversions; +import org.apache.avro.LogicalTypes; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericRecordBuilder; +import org.apache.avro.util.Utf8; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +/** Integration tests for BigQuery Storage API. */ +class ITBigQueryStorageTest { + + private static final Logger LOG = Logger.getLogger(ITBigQueryStorageTest.class.getName()); + private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); + private static final String DESCRIPTION = "BigQuery Storage Java client test dataset"; + + private static BigQueryReadClient client; + private static String parentProjectId; + private static BigQuery bigquery; + + private static final String FAKE_JSON_CRED_WITH_GOOGLE_DOMAIN = + "{\n" + + " \"private_key_id\": \"somekeyid\",\n" + + " \"private_key\": \"-----BEGIN PRIVATE KEY-----\\n" + + "MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC+K2hSuFpAdrJI\\n" + + "nCgcDz2M7t7bjdlsadsasad+fvRSW6TjNQZ3p5LLQY1kSZRqBqylRkzteMOyHgaR\\n" + + "0Pmxh3ILCND5men43j3h4eDbrhQBuxfEMalkG92sL+PNQSETY2tnvXryOvmBRwa/\\n" + + "QP/9dJfIkIDJ9Fw9N4Bhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "knddadwkwewcVxHFhcZJO+XWf6ofLUXpRwiTZakGMn8EE1uVa2LgczOjwWHGi99MFjxSer5m9\\n" + + "1tCa3/KEGKiS/YL71JvjwX3mb+cewlkcmweBKZHM2JPTk0ZednFSpVZMtycjkbLa\\n" + + "dYOS8V85AgMBewECggEBAKksaldajfDZDV6nGqbFjMiizAKJolr/M3OQw16K6o3/\\n" + + "0S31xIe3sSlgW0+UbYlF4U8KifhManD1apVSC3csafaspP4RZUHFhtBywLO9pR5c\\n" + + "r6S5aLp+gPWFyIp1pfXbWGvc5VY/v9x7ya1VEa6rXvLsKupSeWAW4tMj3eo/64ge\\n" + + "sdaceaLYw52KeBYiT6+vpsnYrEkAHO1fF/LavbLLOFJmFTMxmsNaG0tuiJHgjshB\\n" + + "82DpMCbXG9YcCgI/DbzuIjsdj2JC1cascSP//3PmefWysucBQe7Jryb6NQtASmnv\\n" + + "CdDw/0jmZTEjpe4S1lxfHplAhHFtdgYTvyYtaLZiVVkCgYEA8eVpof2rceecw/I6\\n" + + "5ng1q3Hl2usdWV/4mZMvR0fOemacLLfocX6IYxT1zA1FFJlbXSRsJMf/Qq39mOR2\\n" + + "SpW+hr4jCoHeRVYLgsbggtrevGmILAlNoqCMpGZ6vDmJpq6ECV9olliDvpPgWOP+\\n" + + "mYPDreFBGxWvQrADNbRt2dmGsrsCgYEAyUHqB2wvJHFqdmeBsaacewzV8x9WgmeX\\n" + + "gUIi9REwXlGDW0Mz50dxpxcKCAYn65+7TCnY5O/jmL0VRxU1J2mSWyWTo1C+17L0\\n" + + "3fUqjxL1pkefwecxwecvC+gFFYdJ4CQ/MHHXU81Lwl1iWdFCd2UoGddYaOF+KNeM\\n" + + "HC7cmqra+JsCgYEAlUNywzq8nUg7282E+uICfCB0LfwejuymR93CtsFgb7cRd6ak\\n" + + "ECR8FGfCpH8ruWJINllbQfcHVCX47ndLZwqv3oVFKh6pAS/vVI4dpOepP8++7y1u\\n" + + "coOvtreXCX6XqfrWDtKIvv0vjlHBhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "kndj5uNl5SiuVxHFhcZJO+XWf6ofLUregtevZakGMn8EE1uVa2AY7eafmoU/nZPT\\n" + + "00YB0TBATdCbn/nBSuKDESkhSg9s2GEKQZG5hBmL5uCMfo09z3SfxZIhJdlerreP\\n" + + "J7gSidI12N+EZxYd4xIJh/HFDgp7RRO87f+WJkofMQKBgGTnClK1VMaCRbJZPriw\\n" + + "EfeFCoOX75MxKwXs6xgrw4W//AYGGUjDt83lD6AZP6tws7gJ2IwY/qP7+lyhjEqN\\n" + + "HtfPZRGFkGZsdaksdlaksd323423d+15/UvrlRSFPNj1tWQmNKkXyRDW4IG1Oa2p\\n" + + "rALStNBx5Y9t0/LQnFI4w3aG\\n" + + "-----END PRIVATE KEY-----\\n" + + "\",\n" + + " \"project_id\": \"someprojectid\",\n" + + " \"client_email\": \"someclientid@developer.gserviceaccount.com\",\n" + + " \"client_id\": \"someclientid.apps.googleusercontent.com\",\n" + + " \"type\": \"service_account\",\n" + + " \"universe_domain\": \"googleapis.com\"\n" + + "}"; + + private static final String FAKE_JSON_CRED_WITH_INVALID_DOMAIN = + "{\n" + + " \"private_key_id\": \"somekeyid\",\n" + + " \"private_key\": \"-----BEGIN PRIVATE KEY-----\\n" + + "MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC+K2hSuFpAdrJI\\n" + + "nCgcDz2M7t7bjdlsadsasad+fvRSW6TjNQZ3p5LLQY1kSZRqBqylRkzteMOyHgaR\\n" + + "0Pmxh3ILCND5men43j3h4eDbrhQBuxfEMalkG92sL+PNQSETY2tnvXryOvmBRwa/\\n" + + "QP/9dJfIkIDJ9Fw9N4Bhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "knddadwkwewcVxHFhcZJO+XWf6ofLUXpRwiTZakGMn8EE1uVa2LgczOjwWHGi99MFjxSer5m9\\n" + + "1tCa3/KEGKiS/YL71JvjwX3mb+cewlkcmweBKZHM2JPTk0ZednFSpVZMtycjkbLa\\n" + + "dYOS8V85AgMBewECggEBAKksaldajfDZDV6nGqbFjMiizAKJolr/M3OQw16K6o3/\\n" + + "0S31xIe3sSlgW0+UbYlF4U8KifhManD1apVSC3csafaspP4RZUHFhtBywLO9pR5c\\n" + + "r6S5aLp+gPWFyIp1pfXbWGvc5VY/v9x7ya1VEa6rXvLsKupSeWAW4tMj3eo/64ge\\n" + + "sdaceaLYw52KeBYiT6+vpsnYrEkAHO1fF/LavbLLOFJmFTMxmsNaG0tuiJHgjshB\\n" + + "82DpMCbXG9YcCgI/DbzuIjsdj2JC1cascSP//3PmefWysucBQe7Jryb6NQtASmnv\\n" + + "CdDw/0jmZTEjpe4S1lxfHplAhHFtdgYTvyYtaLZiVVkCgYEA8eVpof2rceecw/I6\\n" + + "5ng1q3Hl2usdWV/4mZMvR0fOemacLLfocX6IYxT1zA1FFJlbXSRsJMf/Qq39mOR2\\n" + + "SpW+hr4jCoHeRVYLgsbggtrevGmILAlNoqCMpGZ6vDmJpq6ECV9olliDvpPgWOP+\\n" + + "mYPDreFBGxWvQrADNbRt2dmGsrsCgYEAyUHqB2wvJHFqdmeBsaacewzV8x9WgmeX\\n" + + "gUIi9REwXlGDW0Mz50dxpxcKCAYn65+7TCnY5O/jmL0VRxU1J2mSWyWTo1C+17L0\\n" + + "3fUqjxL1pkefwecxwecvC+gFFYdJ4CQ/MHHXU81Lwl1iWdFCd2UoGddYaOF+KNeM\\n" + + "HC7cmqra+JsCgYEAlUNywzq8nUg7282E+uICfCB0LfwejuymR93CtsFgb7cRd6ak\\n" + + "ECR8FGfCpH8ruWJINllbQfcHVCX47ndLZwqv3oVFKh6pAS/vVI4dpOepP8++7y1u\\n" + + "coOvtreXCX6XqfrWDtKIvv0vjlHBhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\n" + + "kndj5uNl5SiuVxHFhcZJO+XWf6ofLUregtevZakGMn8EE1uVa2AY7eafmoU/nZPT\\n" + + "00YB0TBATdCbn/nBSuKDESkhSg9s2GEKQZG5hBmL5uCMfo09z3SfxZIhJdlerreP\\n" + + "J7gSidI12N+EZxYd4xIJh/HFDgp7RRO87f+WJkofMQKBgGTnClK1VMaCRbJZPriw\\n" + + "EfeFCoOX75MxKwXs6xgrw4W//AYGGUjDt83lD6AZP6tws7gJ2IwY/qP7+lyhjEqN\\n" + + "HtfPZRGFkGZsdaksdlaksd323423d+15/UvrlRSFPNj1tWQmNKkXyRDW4IG1Oa2p\\n" + + "rALStNBx5Y9t0/LQnFI4w3aG\\n" + + "-----END PRIVATE KEY-----\\n" + + "\",\n" + + " \"project_id\": \"someprojectid\",\n" + + " \"client_email\": \"someclientid@developer.gserviceaccount.com\",\n" + + " \"client_id\": \"someclientid.apps.googleusercontent.com\",\n" + + " \"type\": \"service_account\",\n" + + " \"universe_domain\": \"fake.domain\"\n" + + "}"; + + @BeforeAll + static void beforeAll() throws IOException { + client = BigQueryReadClient.create(); + parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId()); + + LOG.info( + String.format( + "%s tests running with parent project: %s", + ITBigQueryStorageTest.class.getSimpleName(), parentProjectId)); + + RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); + bigquery = bigqueryHelper.getOptions().getService(); + DatasetInfo datasetInfo = + DatasetInfo.newBuilder(/* datasetId= */ DATASET).setDescription(DESCRIPTION).build(); + bigquery.create(datasetInfo); + LOG.info("Created test dataset: " + DATASET); + } + + @AfterAll + static void afterAll() { + if (client != null) { + client.close(); + } + + if (bigquery != null) { + RemoteBigQueryHelper.forceDelete(bigquery, DATASET); + LOG.info("Deleted test dataset: " + DATASET); + } + } + + @Test + void testSimpleRead() { + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + ReadSession session = + client.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + long rowCount = 0; + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + assertEquals(164_656, rowCount); + } + + @Test + void testSimpleReadArrow() { + String table = + com.google.cloud.bigquery.storage.v1.it.util.BigQueryResource.formatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + ReadSession session = + client.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.ARROW) + .build(), + /* maxStreamCount= */ 1); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + // Assert that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results + // directly. + Preconditions.checkState(session.getStreamsCount() > 0); + + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + long rowCount = 0; + // Process each block of rows as they arrive and decode using our simple row reader. + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasArrowRecordBatch()); + rowCount += response.getRowCount(); + } + assertEquals(164_656, rowCount); + } + + @Test + void testRangeType() throws InterruptedException { + // Create table with Range values. + String tableName = "test_range_type" + UUID.randomUUID().toString().substring(0, 8); + TableId tableId = TableId.of(DATASET, tableName); + QueryJobConfiguration createTable = + QueryJobConfiguration.newBuilder( + String.format( + "CREATE TABLE %s AS SELECT RANGE(DATE '2020-01-01', DATE '2020-12-31') as date," + + " \n" + + "RANGE(DATETIME '2020-01-01T12:00:00', DATETIME '2020-12-31T12:00:00') as" + + " datetime, \n" + + "RANGE(TIMESTAMP '2014-01-01 07:00:00.000000+00:00', TIMESTAMP" + + " '2015-01-01 07:00:00.000000+00:00') as timestamp", + tableName)) + .setDefaultDataset(DatasetId.of(DATASET)) + .setUseLegacySql(false) + .build(); + bigquery.query(createTable); + + String table = + com.google.cloud.bigquery.storage.v1.it.util.BigQueryResource.formatTableResource( + /* projectId= */ ServiceOptions.getDefaultProjectId(), + /* datasetId= */ DATASET, + /* tableId= */ tableId.getTable()); + + ReadSession session = + client.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.ARROW) + .build(), + /* maxStreamCount= */ 1); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + // Assert that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results + // directly. + Preconditions.checkState(session.getStreamsCount() > 0); + + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + long rowCount = 0; + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasArrowRecordBatch()); + rowCount += response.getRowCount(); + } + assertEquals(1, rowCount); + } + + @Test + void testSimpleReadAndResume() { + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + ReadSession session = + client.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + // We have to read some number of rows in order to be able to resume. More details: + + long rowCount = ReadStreamToOffset(session.getStreams(0), /* rowOffset= */ 34_846); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder() + .setReadStream(session.getStreams(0).getName()) + .setOffset(rowCount) + .build(); + + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + // Verifies that the number of rows skipped and read equals to the total number of rows in the + // table. + assertEquals(164_656, rowCount); + } + + @Test + void testFilter() throws IOException { + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + TableReadOptions options = + TableReadOptions.newBuilder().setRowRestriction("word_count > 100").build(); + + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parentProjectId) + .setMaxStreamCount(1) + .setReadSession( + ReadSession.newBuilder() + .setTable(table) + .setReadOptions(options) + .setDataFormat(DataFormat.AVRO) + .build()) + .build(); + + ReadSession session = client.createReadSession(request); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + SimpleRowReader reader = + new SimpleRowReader(new Schema.Parser().parse(session.getAvroSchema().getSchema())); + + long rowCount = 0; + + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + reader.processRows( + response.getAvroRows(), + new AvroRowConsumer() { + @Override + public void accept(GenericData.Record record) { + Long wordCount = (Long) record.get("word_count"); + assertWithMessage("Row not matching expectations: %s", record.toString()) + .that(wordCount) + .isGreaterThan(100L); + } + }); + } + + assertEquals(1_333, rowCount); + } + + @Test + void testColumnSelection() throws IOException { + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + TableReadOptions options = + TableReadOptions.newBuilder() + .addSelectedFields("word") + .addSelectedFields("word_count") + .setRowRestriction("word_count > 100") + .build(); + + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parentProjectId) + .setMaxStreamCount(1) + .setReadSession( + ReadSession.newBuilder() + .setTable(table) + .setReadOptions(options) + .setDataFormat(DataFormat.AVRO) + .build()) + .build(); + + ReadSession session = client.createReadSession(request); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + Schema avroSchema = new Schema.Parser().parse(session.getAvroSchema().getSchema()); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + + assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage); + assertEquals( + Schema.Type.STRING, avroSchema.getField("word").schema().getType(), actualSchemaMessage); + assertEquals( + Schema.Type.LONG, + avroSchema.getField("word_count").schema().getType(), + actualSchemaMessage); + + SimpleRowReader reader = new SimpleRowReader(avroSchema); + + long rowCount = 0; + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + reader.processRows( + response.getAvroRows(), + new AvroRowConsumer() { + @Override + public void accept(GenericData.Record record) { + String rowAssertMessage = + String.format("Row not matching expectations: %s", record.toString()); + + Long wordCount = (Long) record.get("word_count"); + assertWithMessage(rowAssertMessage).that(wordCount).isGreaterThan(100L); + + Utf8 word = (Utf8) record.get("word"); + assertWithMessage(rowAssertMessage).that(word.length()).isGreaterThan(0); + } + }); + } + + assertEquals(1_333, rowCount); + } + + @Test + void testReadAtSnapshot() throws InterruptedException, IOException { + Field intFieldSchema = + Field.newBuilder("col", LegacySQLTypeName.INTEGER) + .setMode(Mode.REQUIRED) + .setDescription("IntegerDescription") + .build(); + com.google.cloud.bigquery.Schema tableSchema = + com.google.cloud.bigquery.Schema.of(intFieldSchema); + + TableId testTableId = TableId.of(/* dataset= */ DATASET, /* table= */ "test_read_snapshot"); + bigquery.create(TableInfo.of(testTableId, StandardTableDefinition.of(tableSchema))); + + testTableId.toString(); + + Job firstJob = + RunQueryAppendJobAndExpectSuccess( + /* destinationTableId= */ testTableId, /* query= */ "SELECT 1 AS col"); + + Job secondJob = + RunQueryAppendJobAndExpectSuccess( + /* destinationTableId= */ testTableId, /* query= */ "SELECT 2 AS col"); + + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ ServiceOptions.getDefaultProjectId(), + /* datasetId= */ DATASET, + /* tableId= */ testTableId.getTable()); + + final List rowsAfterFirstSnapshot = new ArrayList<>(); + ProcessRowsAtSnapshot( + /* table= */ table, + /* snapshotInMillis= */ firstJob.getStatistics().getEndTime(), + /* filter= */ null, + /* consumer= */ new AvroRowConsumer() { + @Override + public void accept(GenericData.Record record) { + rowsAfterFirstSnapshot.add((Long) record.get("col")); + } + }); + assertEquals(Arrays.asList(1L), rowsAfterFirstSnapshot); + + final List rowsAfterSecondSnapshot = new ArrayList<>(); + ProcessRowsAtSnapshot( + /* table= */ table, + /* snapshotInMillis= */ secondJob.getStatistics().getEndTime(), + /* filter= */ null, + /* consumer= */ new AvroRowConsumer() { + @Override + public void accept(GenericData.Record record) { + rowsAfterSecondSnapshot.add((Long) record.get("col")); + } + }); + Collections.sort(rowsAfterSecondSnapshot); + assertEquals(Arrays.asList(1L, 2L), rowsAfterSecondSnapshot); + } + + @Test + void testColumnPartitionedTableByDateField() throws InterruptedException, IOException { + String partitionedTableName = + "test_column_partition_table_by_date" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s (num_field INT64, date_field DATE) " + + " PARTITION BY date_field " + + " OPTIONS( " + + " description=\"a table partitioned by date_field\" " + + " ) " + + "AS " + + " SELECT 1, CAST(\"2019-01-01\" AS DATE)" + + " UNION ALL" + + " SELECT 2, CAST(\"2019-01-02\" AS DATE)" + + " UNION ALL" + + " SELECT 3, CAST(\"2019-01-03\" AS DATE)", + DATASET, partitionedTableName); + + RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ ServiceOptions.getDefaultProjectId(), + /* datasetId= */ DATASET, + /* tableId= */ partitionedTableName); + + List unfilteredRows = ReadAllRows(/* table= */ table, /* filter= */ null); + assertEquals(3, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString()); + + List partitionFilteredRows = + ReadAllRows(/* table= */ table, /* filter= */ "date_field = CAST(\"2019-01-02\" AS DATE)"); + assertEquals( + 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString()); + assertEquals(2L, partitionFilteredRows.get(0).get("num_field")); + } + + @Test + void testIngestionTimePartitionedTable() throws InterruptedException, IOException { + Field intFieldSchema = + Field.newBuilder("num_field", LegacySQLTypeName.INTEGER) + .setMode(Mode.REQUIRED) + .setDescription("IntegerDescription") + .build(); + com.google.cloud.bigquery.Schema tableSchema = + com.google.cloud.bigquery.Schema.of(intFieldSchema); + + TableId testTableId = + TableId.of(/* dataset= */ DATASET, /* table= */ "test_date_partitioned_table"); + bigquery.create( + TableInfo.of( + testTableId, + StandardTableDefinition.newBuilder() + .setTimePartitioning(TimePartitioning.of(TimePartitioning.Type.DAY)) + .setSchema(tableSchema) + .build())); + + // Simulate ingestion for 2019-01-01. + RunQueryAppendJobAndExpectSuccess( + /* destinationTableId= */ TableId.of( + /* dataset= */ DATASET, /* table= */ testTableId.getTable() + "$20190101"), + /* query= */ "SELECT 1 AS num_field"); + + // Simulate ingestion for 2019-01-02. + RunQueryAppendJobAndExpectSuccess( + /* destinationTableId= */ TableId.of( + /* dataset= */ DATASET, /* table= */ testTableId.getTable() + "$20190102"), + /* query= */ "SELECT 2 AS num_field"); + + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ ServiceOptions.getDefaultProjectId(), + /* datasetId= */ testTableId.getDataset(), + /* tableId= */ testTableId.getTable()); + + List unfilteredRows = ReadAllRows(/* table= */ table, /* filter= */ null); + assertEquals(2, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString()); + + List partitionFilteredRows = + ReadAllRows(/* table= */ table, /* filter= */ "_PARTITIONDATE > \"2019-01-01\""); + assertEquals( + 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString()); + assertEquals(2L, partitionFilteredRows.get(0).get("num_field")); + } + + @Test + void testBasicSqlTypes() throws InterruptedException, IOException { + String tableName = "test_basic_sql_types" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s " + + " (int_field INT64 NOT NULL," + + " num_field NUMERIC NOT NULL," + + " float_field FLOAT64 NOT NULL," + + " bool_field BOOL NOT NULL," + + " str_field STRING NOT NULL," + + " bytes_field BYTES NOT NULL) " + + " OPTIONS( " + + " description=\"a table with basic column types\" " + + " ) " + + "AS " + + " SELECT " + + " 17," + + " CAST(1234.56 AS NUMERIC)," + + " 6.547678," + + " TRUE," + + " \"String field value\"," + + " b\"абвгд\"", + DATASET, tableName); + + RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ ServiceOptions.getDefaultProjectId(), + /* datasetId= */ DATASET, + /* tableId= */ tableName); + + List rows = ReadAllRows(/* table= */ table, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); + + GenericData.Record record = rows.get(0); + Schema avroSchema = record.getSchema(); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); + + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(6, avroSchema.getFields().size(), actualSchemaMessage); + + assertEquals( + Schema.Type.LONG, avroSchema.getField("int_field").schema().getType(), actualSchemaMessage); + assertEquals(17L, (long) record.get("int_field"), rowAssertMessage); + + assertEquals( + Schema.Type.BYTES, + avroSchema.getField("num_field").schema().getType(), + actualSchemaMessage); + assertEquals( + LogicalTypes.decimal(/* precision= */ 38, /* scale= */ 9), + avroSchema.getField("num_field").schema().getLogicalType(), + actualSchemaMessage); + BigDecimal actual_num_field = + new Conversions.DecimalConversion() + .fromBytes( + (ByteBuffer) record.get("num_field"), + avroSchema, + avroSchema.getField("num_field").schema().getLogicalType()); + assertEquals( + BigDecimal.valueOf(/* unscaledVal= */ 1_234_560_000_000L, /* scale= */ 9), + actual_num_field, + rowAssertMessage); + + assertEquals( + Schema.Type.DOUBLE, + avroSchema.getField("float_field").schema().getType(), + actualSchemaMessage); + assertEquals( + /* expected= */ 6.547678d, + /* actual= */ (double) record.get("float_field"), + /* delta= */ 0.0001, + rowAssertMessage); + + assertEquals( + Schema.Type.BOOLEAN, + avroSchema.getField("bool_field").schema().getType(), + actualSchemaMessage); + assertEquals(true, record.get("bool_field"), rowAssertMessage); + + assertEquals( + Schema.Type.STRING, + avroSchema.getField("str_field").schema().getType(), + actualSchemaMessage); + assertEquals(new Utf8("String field value"), record.get("str_field"), rowAssertMessage); + + assertEquals( + Schema.Type.BYTES, + avroSchema.getField("bytes_field").schema().getType(), + actualSchemaMessage); + assertArrayEquals( + Utf8.getBytesFor("абвгд"), + ((ByteBuffer) (record.get("bytes_field"))).array(), + rowAssertMessage); + } + + @Test + void testDateAndTimeSqlTypes() throws InterruptedException, IOException { + String tableName = + "test_date_and_time_sql_types" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s " + + " (date_field DATE NOT NULL," + + " datetime_field DATETIME NOT NULL," + + " time_field TIME NOT NULL," + + " timestamp_field TIMESTAMP NOT NULL)" + + " OPTIONS( " + + " description=\"a table with date and time column types\" " + + " ) " + + "AS " + + " SELECT " + + " CAST(\"2019-05-31\" AS DATE)," + + " CAST(\"2019-04-30 21:47:59.999999\" AS DATETIME)," + + " CAST(\"21:47:59.999999\" AS TIME)," + + " CAST(\"2019-04-30 19:24:19.123456 UTC\" AS TIMESTAMP)", + DATASET, tableName); + + RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ ServiceOptions.getDefaultProjectId(), + /* datasetId= */ DATASET, + /* tableId= */ tableName); + + List rows = ReadAllRows(/* table= */ table, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); + + GenericData.Record record = rows.get(0); + Schema avroSchema = record.getSchema(); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); + + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(4, avroSchema.getFields().size(), actualSchemaMessage); + + assertEquals( + Schema.Type.INT, avroSchema.getField("date_field").schema().getType(), actualSchemaMessage); + assertEquals( + LogicalTypes.date(), + avroSchema.getField("date_field").schema().getLogicalType(), + actualSchemaMessage); + assertEquals( + LocalDate.of(/* year= */ 2019, /* month= */ 5, /* dayOfMonth= */ 31), + LocalDate.ofEpochDay((int) record.get("date_field")), + rowAssertMessage); + + assertEquals( + Schema.Type.STRING, + avroSchema.getField("datetime_field").schema().getType(), + actualSchemaMessage); + assertEquals( + "datetime", + avroSchema.getField("datetime_field").schema().getObjectProp("logicalType"), + actualSchemaMessage); + assertEquals( + new Utf8("2019-04-30T21:47:59.999999"), + (Utf8) record.get("datetime_field"), + rowAssertMessage); + + assertEquals( + Schema.Type.LONG, + avroSchema.getField("time_field").schema().getType(), + actualSchemaMessage); + assertEquals( + LogicalTypes.timeMicros(), + avroSchema.getField("time_field").schema().getLogicalType(), + actualSchemaMessage); + assertEquals( + LocalTime.of( + /* hour= */ 21, /* minute= */ 47, /* second= */ 59, /* nanoOfSecond= */ 999_999_000), + LocalTime.ofNanoOfDay(1_000L * (long) record.get("time_field")), + rowAssertMessage); + + assertEquals( + Schema.Type.LONG, + avroSchema.getField("timestamp_field").schema().getType(), + actualSchemaMessage); + assertEquals( + LogicalTypes.timestampMicros(), + avroSchema.getField("timestamp_field").schema().getLogicalType(), + actualSchemaMessage); + ZonedDateTime expected_timestamp = + ZonedDateTime.parse( + "2019-04-30T19:24:19Z", DateTimeFormatter.ISO_INSTANT.withZone(ZoneOffset.UTC)) + .withNano(123_456_000); + long actual_timestamp_micros = (long) record.get("timestamp_field"); + ZonedDateTime actual_timestamp = + ZonedDateTime.ofInstant( + Instant.ofEpochSecond( + /* epochSecond= */ actual_timestamp_micros / 1_000_000, + (actual_timestamp_micros % 1_000_000) * 1_000), + ZoneOffset.UTC); + assertEquals(expected_timestamp, actual_timestamp, rowAssertMessage); + } + + @Test + void testGeographySqlType() throws InterruptedException, IOException { + String tableName = "test_geography_sql_type" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s " + + " (geo_field GEOGRAPHY NOT NULL)" + + " OPTIONS( " + + " description=\"a table with a geography column type\" " + + " ) " + + "AS " + + " SELECT ST_GEOGPOINT(1.1, 2.2)", + DATASET, tableName); + + RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ ServiceOptions.getDefaultProjectId(), + /* datasetId= */ DATASET, + /* tableId= */ tableName); + + List rows = ReadAllRows(/* table= */ table, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); + + GenericData.Record record = rows.get(0); + Schema avroSchema = record.getSchema(); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); + + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(1, avroSchema.getFields().size(), actualSchemaMessage); + + assertEquals( + Schema.Type.STRING, + avroSchema.getField("geo_field").schema().getType(), + actualSchemaMessage); + assertEquals( + "GEOGRAPHY", + avroSchema.getField("geo_field").schema().getObjectProp("sqlType"), + actualSchemaMessage); + assertEquals(new Utf8("POINT(1.1 2.2)"), (Utf8) record.get("geo_field"), rowAssertMessage); + } + + @Test + void testStructAndArraySqlTypes() throws InterruptedException, IOException { + String tableName = + "test_struct_and_array_sql_types" + UUID.randomUUID().toString().substring(0, 8); + String createTableStatement = + String.format( + " CREATE TABLE %s.%s (array_field ARRAY, struct_field STRUCT NOT NULL) OPTIONS( description=\"a" + + " table with array and time column types\" ) AS SELECT [1, 2, 3], " + + " (10, 'abc')", + DATASET, tableName); + + RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ ServiceOptions.getDefaultProjectId(), + /* datasetId= */ DATASET, + /* tableId= */ tableName); + + List rows = ReadAllRows(/* table= */ table, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); + + GenericData.Record record = rows.get(0); + Schema avroSchema = record.getSchema(); + + String actualSchemaMessage = + String.format( + "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); + String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); + + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage); + + assertEquals( + Schema.Type.ARRAY, + avroSchema.getField("array_field").schema().getType(), + actualSchemaMessage); + assertEquals( + Schema.Type.LONG, + avroSchema.getField("array_field").schema().getElementType().getType(), + actualSchemaMessage); + assertArrayEquals( + new Long[] {1L, 2L, 3L}, + ((GenericData.Array) record.get("array_field")).toArray(new Long[0]), + rowAssertMessage); + + // Validate the STRUCT field and its members. + Schema structSchema = avroSchema.getField("struct_field").schema(); + assertEquals(Schema.Type.RECORD, structSchema.getType(), actualSchemaMessage); + GenericData.Record structRecord = (GenericData.Record) record.get("struct_field"); + + assertEquals( + Schema.Type.LONG, + structSchema.getField("int_field").schema().getType(), + actualSchemaMessage); + assertEquals(10L, (long) structRecord.get("int_field"), rowAssertMessage); + + assertEquals( + Schema.Type.STRING, + structSchema.getField("str_field").schema().getType(), + actualSchemaMessage); + assertEquals(new Utf8("abc"), structRecord.get("str_field"), rowAssertMessage); + } + + @Test + void testUniverseDomainWithInvalidUniverseDomain() throws IOException { + BigQueryReadSettings bigQueryReadSettings = + BigQueryReadSettings.newBuilder() + .setCredentialsProvider( + FixedCredentialsProvider.create(loadCredentials(FAKE_JSON_CRED_WITH_GOOGLE_DOMAIN))) + .setUniverseDomain("invalid.domain") + .build(); + BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); + + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + UnauthenticatedException e = + assertThrows( + UnauthenticatedException.class, + () -> + localClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1)); + assertThat( + (e.getMessage() + .contains("does not match the universe domain found in the credentials"))) + .isTrue(); + localClient.close(); + } + + @Test + void testInvalidUniverseDomainWithMismatchCredentials() throws IOException { + BigQueryReadSettings bigQueryReadSettings = + BigQueryReadSettings.newBuilder() + .setCredentialsProvider( + FixedCredentialsProvider.create( + loadCredentials(FAKE_JSON_CRED_WITH_INVALID_DOMAIN))) + .setUniverseDomain("invalid.domain") + .build(); + BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); + + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + UnauthenticatedException e = + assertThrows( + UnauthenticatedException.class, + () -> + localClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1)); + assertThat( + (e.getMessage() + .contains("does not match the universe domain found in the credentials"))) + .isTrue(); + localClient.close(); + } + + @Test + void testUniverseDomainWithMatchingDomain() throws IOException { + // Test a valid domain using the default credentials and Google default universe domain. + BigQueryReadSettings bigQueryReadSettings = + BigQueryReadSettings.newBuilder().setUniverseDomain("googleapis.com").build(); + BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); + + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ "bigquery-public-data", + /* datasetId= */ "samples", + /* tableId= */ "shakespeare"); + + ReadSession session = + localClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + long rowCount = 0; + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + assertEquals(164_656, rowCount); + localClient.close(); + } + + void testUniverseDomain() throws IOException { + // This test is not yet part presubmit integration test as it requires the apis-tpclp.goog + // universe domain credentials. + // Test a valid domain using the default credentials and Google default universe domain. + BigQueryReadSettings bigQueryReadSettings = + BigQueryReadSettings.newBuilder().setUniverseDomain("apis-tpclp.goog").build(); + BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); + + String table = + BigQueryResource.FormatTableResource( + /* projectId= */ "google-tpc-testing-environment:cloudsdk-test-project", + /* datasetId= */ "tpc_demo_dataset", + /* tableId= */ "new_table"); + + ReadSession session = + localClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + long rowCount = 0; + ServerStream stream = localClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + rowCount += response.getRowCount(); + } + + assertEquals(1, rowCount); + localClient.close(); + } + + /** + * Reads to the specified row offset within the stream. If the stream does not have the desired + * rows to read, it will read all of them. + * + * @param readStream + * @param rowOffset + * @return the number of requested rows to skip or the total rows read if stream had less rows. + */ + private long ReadStreamToOffset(ReadStream readStream, long rowOffset) { + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(readStream.getName()).build(); + + long rowCount = 0; + ServerStream serverStream = client.readRowsCallable().call(readRowsRequest); + Iterator responseIterator = serverStream.iterator(); + + while (responseIterator.hasNext()) { + ReadRowsResponse response = responseIterator.next(); + rowCount += response.getRowCount(); + if (rowCount >= rowOffset) { + return rowOffset; + } + } + + return rowCount; + } + + /** + * Reads all the rows from the specified table. + * + *

For every row, the consumer is called for processing. + * + * @param table + * @param snapshotInMillis Optional. If specified, all rows up to timestamp will be returned. + * @param filter Optional. If specified, it will be used to restrict returned data. + * @param consumer that receives all Avro rows. + * @throws IOException + */ + private void ProcessRowsAtSnapshot( + String table, Long snapshotInMillis, String filter, AvroRowConsumer consumer) + throws IOException { + Preconditions.checkNotNull(table); + Preconditions.checkNotNull(consumer); + + CreateReadSessionRequest.Builder createSessionRequestBuilder = + CreateReadSessionRequest.newBuilder() + .setParent(parentProjectId) + .setMaxStreamCount(1) + .setReadSession( + ReadSession.newBuilder().setTable(table).setDataFormat(DataFormat.AVRO).build()); + + if (snapshotInMillis != null) { + Timestamp snapshotTimestamp = + Timestamp.newBuilder() + .setSeconds(snapshotInMillis / 1_000) + .setNanos((int) ((snapshotInMillis % 1000) * 1000000)) + .build(); + createSessionRequestBuilder + .getReadSessionBuilder() + .setTableModifiers( + TableModifiers.newBuilder().setSnapshotTime(snapshotTimestamp).build()); + } + + if (filter != null && !filter.isEmpty()) { + createSessionRequestBuilder + .getReadSessionBuilder() + .setReadOptions(TableReadOptions.newBuilder().setRowRestriction(filter).build()); + } + + ReadSession session = client.createReadSession(createSessionRequestBuilder.build()); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + SimpleRowReader reader = + new SimpleRowReader(new Schema.Parser().parse(session.getAvroSchema().getSchema())); + + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + reader.processRows(response.getAvroRows(), consumer); + } + } + + /** + * Reads all the rows from the specified table and returns a list as generic Avro records. + * + * @param table + * @param filter Optional. If specified, it will be used to restrict returned data. + * @return + */ + List ReadAllRows(String table, String filter) throws IOException { + final List rows = new ArrayList<>(); + ProcessRowsAtSnapshot( + /* table= */ table, + /* snapshotInMillis= */ null, + /* filter= */ filter, + new AvroRowConsumer() { + @Override + public void accept(GenericData.Record record) { + // clone the record since that reference will be reused by the reader. + rows.add(new GenericRecordBuilder(record).build()); + } + }); + return rows; + } + + /** + * Runs a query job with WRITE_APPEND disposition to the destination table and returns the + * successfully completed job. + * + * @param destinationTableId + * @param query + * @return + * @throws InterruptedException + */ + private Job RunQueryAppendJobAndExpectSuccess(TableId destinationTableId, String query) + throws InterruptedException { + return RunQueryJobAndExpectSuccess( + QueryJobConfiguration.newBuilder(query) + .setDestinationTable(destinationTableId) + .setUseQueryCache(false) + .setUseLegacySql(false) + .setWriteDisposition(WriteDisposition.WRITE_APPEND) + .build()); + } + + /** + * Runs a query job with provided configuration and returns the successfully completed job. + * + * @param configuration + * @return + * @throws InterruptedException + */ + private Job RunQueryJobAndExpectSuccess(QueryJobConfiguration configuration) + throws InterruptedException { + Job job = bigquery.create(JobInfo.of(configuration)); + Job completedJob = + job.waitFor( + RetryOption.initialRetryDelayDuration(Duration.ofSeconds(1)), + RetryOption.totalTimeoutDuration(Duration.ofMinutes(1))); + + assertNotNull(completedJob); + assertNull( + /* object= */ completedJob.getStatus().getError(), + /* message= */ "Received a job status that is not a success: " + + completedJob.getStatus().toString()); + + return completedJob; + } + + static ServiceAccountCredentials loadCredentials(String credentialFile) { + try { + InputStream keyStream = new ByteArrayInputStream(credentialFile.getBytes()); + return ServiceAccountCredentials.fromStream(keyStream); + } catch (IOException e) { + fail("Couldn't create fake JSON credentials."); + } + return null; + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/SimpleRowReader.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/SimpleRowReader.java new file mode 100644 index 000000000000..2ad7f0099b85 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/SimpleRowReader.java @@ -0,0 +1,77 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2.it; + +import com.google.cloud.bigquery.storage.v1beta2.AvroRows; +import com.google.common.base.Preconditions; +import java.io.IOException; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.io.BinaryDecoder; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.DecoderFactory; + +/* + * SimpleRowReader handles deserialization of the Avro-encoded row blocks transmitted + * from the storage API using a generic datum decoder. + */ +public class SimpleRowReader { + + public interface AvroRowConsumer { + + /** + * Handler for every new Avro row that is read. + * + * @param record is Avro generic record structure. Consumers should not rely on the reference + * and should copy it if needed. The record reference is reused. + */ + void accept(GenericData.Record record); + } + + private final DatumReader datumReader; + + // Decoder object will be reused to avoid re-allocation and too much garbage collection. + private BinaryDecoder decoder = null; + + // Record object will be reused. + private GenericData.Record row = null; + + public SimpleRowReader(Schema schema) { + Preconditions.checkNotNull(schema); + datumReader = new GenericDatumReader<>(schema); + } + + /** + * Processes Avro rows by calling a consumer for each decoded row. + * + * @param avroRows object returned from the ReadRowsResponse. + * @param rowConsumer consumer that accepts GenericRecord. + */ + void processRows(AvroRows avroRows, AvroRowConsumer rowConsumer) throws IOException { + Preconditions.checkNotNull(avroRows); + Preconditions.checkNotNull(rowConsumer); + decoder = + DecoderFactory.get() + .binaryDecoder(avroRows.getSerializedBinaryRows().toByteArray(), decoder); + + while (!decoder.isEnd()) { + row = datumReader.read(row, decoder); + rowConsumer.accept(row); + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStubSettingsTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStubSettingsTest.java new file mode 100644 index 000000000000..2cc066a28e23 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStubSettingsTest.java @@ -0,0 +1,139 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StatusCode.Code; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.WatchdogProvider; +import com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse; +import java.time.Duration; +import java.util.Set; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +public class EnhancedBigQueryReadStubSettingsTest { + + @Test + void testSettingsArePreserved() { + String endpoint = "some.other.host:123"; + CredentialsProvider credentialsProvider = Mockito.mock(CredentialsProvider.class); + Duration watchdogInterval = Duration.ofSeconds(12); + WatchdogProvider watchdogProvider = Mockito.mock(WatchdogProvider.class); + + EnhancedBigQueryReadStubSettings.Builder builder = + EnhancedBigQueryReadStubSettings.newBuilder() + .setEndpoint(endpoint) + .setCredentialsProvider(credentialsProvider) + .setStreamWatchdogCheckIntervalDuration(watchdogInterval) + .setStreamWatchdogProvider(watchdogProvider); + + verifyBuilder(builder, endpoint, credentialsProvider, watchdogInterval, watchdogProvider); + + verifySettings( + builder.build(), endpoint, credentialsProvider, watchdogInterval, watchdogProvider); + + verifyBuilder( + builder.build().toBuilder(), + endpoint, + credentialsProvider, + watchdogInterval, + watchdogProvider); + } + + private void verifyBuilder( + EnhancedBigQueryReadStubSettings.Builder builder, + String endpoint, + CredentialsProvider credentialsProvider, + Duration watchdogInterval, + WatchdogProvider watchdogProvider) { + assertThat(builder.getEndpoint()).isEqualTo(endpoint); + assertThat(builder.getCredentialsProvider()).isEqualTo(credentialsProvider); + assertThat(builder.getStreamWatchdogCheckIntervalDuration()).isEqualTo(watchdogInterval); + assertThat(builder.getStreamWatchdogProvider()).isEqualTo(watchdogProvider); + + InstantiatingGrpcChannelProvider channelProvider = + (InstantiatingGrpcChannelProvider) builder.getTransportChannelProvider(); + assertThat(channelProvider.toBuilder().getMaxInboundMessageSize()).isEqualTo(Integer.MAX_VALUE); + } + + private void verifySettings( + EnhancedBigQueryReadStubSettings settings, + String endpoint, + CredentialsProvider credentialsProvider, + Duration watchdogInterval, + WatchdogProvider watchdogProvider) { + assertThat(settings.getEndpoint()).isEqualTo(endpoint); + assertThat(settings.getCredentialsProvider()).isEqualTo(credentialsProvider); + assertThat(settings.getStreamWatchdogCheckIntervalDuration()).isEqualTo(watchdogInterval); + assertThat(settings.getStreamWatchdogProvider()).isEqualTo(watchdogProvider); + + InstantiatingGrpcChannelProvider channelProvider = + (InstantiatingGrpcChannelProvider) settings.getTransportChannelProvider(); + assertThat(channelProvider.toBuilder().getMaxInboundMessageSize()).isEqualTo(Integer.MAX_VALUE); + } + + @Test + void testCreateReadSessionSettings() { + UnaryCallSettings.Builder builder = + EnhancedBigQueryReadStubSettings.newBuilder().createReadSessionSettings(); + verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); + } + + @Test + void testReadRowsSettings() { + ServerStreamingCallSettings.Builder builder = + EnhancedBigQueryReadStubSettings.newBuilder().readRowsSettings(); + assertThat(builder.getRetryableCodes()).contains(Code.UNAVAILABLE); + RetrySettings retrySettings = builder.getRetrySettings(); + assertThat(retrySettings.getInitialRetryDelayDuration()).isEqualTo(Duration.ofMillis(100L)); + assertThat(retrySettings.getRetryDelayMultiplier()).isWithin(1e-6).of(1.3); + assertThat(retrySettings.getMaxRetryDelayDuration()).isEqualTo(Duration.ofMinutes(1L)); + assertThat(retrySettings.getInitialRpcTimeoutDuration()).isEqualTo(Duration.ofDays(1L)); + assertThat(retrySettings.getRpcTimeoutMultiplier()).isWithin(1e-6).of(1.0); + assertThat(retrySettings.getMaxRpcTimeoutDuration()).isEqualTo(Duration.ofDays(1L)); + assertThat(retrySettings.getTotalTimeoutDuration()).isEqualTo(Duration.ofDays(1L)); + assertThat(builder.getIdleTimeoutDuration()).isEqualTo(Duration.ZERO); + } + + @Test + void testSplitReadStreamSettings() { + UnaryCallSettings.Builder builder = + EnhancedBigQueryReadStubSettings.newBuilder().splitReadStreamSettings(); + verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); + } + + private void verifyRetrySettings(Set retryCodes, RetrySettings retrySettings) { + assertThat(retryCodes).contains(Code.UNAVAILABLE); + assertThat(retrySettings.getTotalTimeoutDuration()).isGreaterThan(Duration.ZERO); + assertThat(retrySettings.getInitialRetryDelayDuration()).isGreaterThan(Duration.ZERO); + assertThat(retrySettings.getRetryDelayMultiplier()).isAtLeast(1.0); + assertThat(retrySettings.getMaxRetryDelayDuration()).isGreaterThan(Duration.ZERO); + assertThat(retrySettings.getInitialRpcTimeoutDuration()).isGreaterThan(Duration.ZERO); + assertThat(retrySettings.getRpcTimeoutMultiplier()).isAtLeast(1.0); + assertThat(retrySettings.getMaxRpcTimeoutDuration()).isGreaterThan(Duration.ZERO); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/ResourceHeaderTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/ResourceHeaderTest.java new file mode 100644 index 000000000000..a3eeb9e43fc6 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/ResourceHeaderTest.java @@ -0,0 +1,226 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.InProcessServer; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.api.gax.rpc.UnimplementedException; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadGrpc.BigQueryReadImplBase; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadSettings; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryWriteSettings; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.WriteStream; +import java.util.regex.Pattern; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class ResourceHeaderTest { + + private static final String TEST_TABLE_REFERENCE = + "projects/project/datasets/dataset/tables/table"; + + private static final String WRITE_STREAM_NAME = + "projects/project/datasets/dataset/tables/table/streams/stream"; + + private static final String TEST_STREAM_NAME = "streamName"; + + private static final String NAME = "resource-header-test:123-v1beta2"; + + private static final String HEADER_NAME = "x-goog-request-params"; + + private static final Pattern READ_SESSION_NAME_PATTERN = + Pattern.compile( + ".*" + + "read_session\\.table=projects%2Fproject%2Fdatasets%2Fdataset%2Ftables%2Ftable" + + ".*"); + + private static final Pattern PARENT_PATTERN = + Pattern.compile( + ".*" + "parent=projects%2Fproject%2Fdatasets%2Fdataset%2Ftables%2Ftable" + ".*"); + + private static final Pattern NAME_PATTERN = + Pattern.compile( + ".*" + + "name=projects%2Fproject%2Fdatasets%2Fdataset%2Ftables%2Ftable%2Fstreams%2Fstream" + + ".*"); + + private static final Pattern WRITE_STREAM_PATTERN = + Pattern.compile( + ".*write_stream=projects%2Fproject%2Fdatasets%2Fdataset%2Ftables%2Ftable%2Fstreams%2Fstream" + + ".*"); + + private static final Pattern READ_STREAM_PATTERN = + Pattern.compile(".*" + "read_stream=streamName" + ".*"); + private static final Pattern STREAM_NAME_PATTERN = + Pattern.compile(".*" + "name=streamName" + ".*"); + + private static final String TEST_HEADER_NAME = "simple-header-name"; + private static final String TEST_HEADER_VALUE = "simple-header-value"; + private static final Pattern TEST_PATTERN = Pattern.compile(".*" + TEST_HEADER_VALUE + ".*"); + + private static InProcessServer server; + + private LocalChannelProvider channelProvider; + private LocalChannelProvider channelProvider2; + private BigQueryReadClient client; + private BigQueryWriteClient writeClient; + + @BeforeAll + public static void setUpClass() throws Exception { + server = new InProcessServer<>(new BigQueryReadImplBase() {}, NAME); + server.start(); + } + + @BeforeEach + void setUp() throws Exception { + channelProvider = LocalChannelProvider.create(NAME); + BigQueryReadSettings.Builder settingsBuilder = + BigQueryReadSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setHeaderProvider(FixedHeaderProvider.create(TEST_HEADER_NAME, TEST_HEADER_VALUE)) + .setTransportChannelProvider(channelProvider); + client = BigQueryReadClient.create(settingsBuilder.build()); + channelProvider2 = LocalChannelProvider.create(NAME); + BigQueryWriteSettings.Builder writeSettingsBuilder = + BigQueryWriteSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setTransportChannelProvider(channelProvider2); + writeClient = BigQueryWriteClient.create(writeSettingsBuilder.build()); + } + + @AfterEach + void tearDown() throws Exception { + client.close(); + } + + @AfterAll + static void tearDownClass() throws Exception { + server.stop(); + server.blockUntilShutdown(); + } + + @Test + void createReadSessionTest() { + try { + client.createReadSession( + "parents/project", ReadSession.newBuilder().setTable(TEST_TABLE_REFERENCE).build(), 1); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + verifyHeaderSent(READ_SESSION_NAME_PATTERN); + } + + @Test + void readRowsTest() { + try { + ReadRowsRequest request = + ReadRowsRequest.newBuilder().setReadStream(TEST_STREAM_NAME).setOffset(125).build(); + client.readRowsCallable().call(request); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + + verifyHeaderSent(READ_STREAM_PATTERN); + } + + @Test + void splitReadStreamTest() { + try { + client.splitReadStream(SplitReadStreamRequest.newBuilder().setName(TEST_STREAM_NAME).build()); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + + verifyHeaderSent(STREAM_NAME_PATTERN); + } + + @Test + void createWriteStreamTest() { + try { + writeClient.createWriteStream( + "projects/project/datasets/dataset/tables/table", + WriteStream.newBuilder().setType(WriteStream.Type.BUFFERED).build()); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + boolean headerSent = channelProvider2.isHeaderSent(HEADER_NAME, PARENT_PATTERN); + assertWithMessage("Generated header was sent").that(headerSent).isTrue(); + } + + @Test + void getWriteStreamTest() { + try { + writeClient.getWriteStream(WRITE_STREAM_NAME); + } catch (UnimplementedException e) { + // Ignore the error: none of the methods are actually implemented. + } + boolean headerSent = channelProvider2.isHeaderSent(HEADER_NAME, NAME_PATTERN); + assertWithMessage("Generated header was sent").that(headerSent).isTrue(); + } + + // Following tests will work after b/185842996 is fixed. + // @Test + // void appendRowsTest() { + // try { + // AppendRowsRequest req = + // AppendRowsRequest.newBuilder().setWriteStream(WRITE_STREAM_NAME).build(); + // BidiStream bidiStream = + // writeClient.appendRowsCallable().call(); + // bidiStream.send(req); + // } catch (UnimplementedException e) { + // // Ignore the error: none of the methods are actually implemented. + // } + // boolean headerSent = channelProvider2.isHeaderSent(HEADER_NAME, WRITE_STREAM_PATTERN); + // assertWithMessage("Generated header was sent").that(headerSent).isTrue(); + // } + // + // @Test + // void appendRowsManualTest() { + // try { + // StreamWriterV2 streamWriter = + // StreamWriterV2.newBuilder(WRITE_STREAM_NAME, writeClient) + // .setWriterSchema(ProtoSchema.newBuilder().build()) + // .build(); + // streamWriter.append(ProtoRows.newBuilder().build(), 1); + // } catch (UnimplementedException e) { + // // Ignore the error: none of the methods are actually implemented. + // } catch (IOException e) { + // // Ignore the error: none of the methods are actually implemented. + // } + // boolean headerSent = channelProvider2.isHeaderSent(HEADER_NAME, WRITE_STREAM_PATTERN); + // assertWithMessage("Generated header was sent").that(headerSent).isTrue(); + // } + + private void verifyHeaderSent(Pattern... patterns) { + for (Pattern pattern : patterns) { + boolean headerSent = channelProvider.isHeaderSent(HEADER_NAME, pattern); + assertWithMessage("Generated header was sent").that(headerSent).isTrue(); + } + boolean testHeaderSent = channelProvider.isHeaderSent(TEST_HEADER_NAME, TEST_PATTERN); + assertWithMessage("Provided header was sent").that(testHeaderSent).isTrue(); + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsRetryTest.java b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsRetryTest.java new file mode 100644 index 000000000000..4b6fe018ebcc --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsRetryTest.java @@ -0,0 +1,241 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2.stub.readrows; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.InProcessServer; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadGrpc.BigQueryReadImplBase; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadSettings; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.common.collect.Queues; +import io.grpc.Status.Code; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.List; +import java.util.Queue; +import java.util.UUID; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class ReadRowsRetryTest { + + private TestBigQueryStorageService service; + private BigQueryReadClient client; + private InProcessServer server; + private LocalChannelProvider channelProvider; + + @BeforeEach + void setUp() throws Exception { + service = new TestBigQueryStorageService(); + String serverName = UUID.randomUUID().toString(); + server = new InProcessServer<>(service, serverName); + server.start(); + channelProvider = LocalChannelProvider.create(serverName); + + BigQueryReadSettings settings = + BigQueryReadSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setTransportChannelProvider(channelProvider) + .build(); + + client = BigQueryReadClient.create(settings); + } + + @AfterEach + void tearDown() throws Exception { + client.close(); + server.stop(); + server.blockUntilShutdown(); + } + + @Test + void happyPathTest() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7)); + + assertEquals(17, getRowCount(request)); + } + + @Test + void immediateRetryTest() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7)); + + assertEquals(17, getRowCount(request)); + } + + @Test + void multipleRetryTestWithZeroInitialOffset() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithNumberOfRows(5) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 5) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create().expectRequest("fake-stream", 22).respondWithNumberOfRows(6)); + + assertEquals(28, getRowCount(request)); + } + + @Test + void multipleRetryTestWithNonZeroInitialOffset() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 17); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 17) + .respondWithNumberOfRows(5) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 22) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create().expectRequest("fake-stream", 39).respondWithNumberOfRows(3)); + + assertEquals(25, getRowCount(request)); + } + + @Test + void errorAtTheVeryEndTest() { + ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); + service.expectations.add( + RpcExpectation.create() + .expectRequest("fake-stream", 0) + .respondWithNumberOfRows(10) + .respondWithNumberOfRows(7) + .respondWithStatus(Code.UNAVAILABLE)); + + service.expectations.add( + RpcExpectation.create().expectRequest("fake-stream", 17).respondWithNumberOfRows(0)); + + assertEquals(17, getRowCount(request)); + } + + private int getRowCount(ReadRowsRequest request) { + ServerStream serverStream = client.readRowsCallable().call(request); + int rowCount = 0; + for (ReadRowsResponse readRowsResponse : serverStream) { + rowCount += readRowsResponse.getRowCount(); + } + return rowCount; + } + + private static class TestBigQueryStorageService extends BigQueryReadImplBase { + + Queue expectations = Queues.newArrayDeque(); + int currentRequestIndex = -1; + + @Override + public void readRows( + ReadRowsRequest request, StreamObserver responseObserver) { + + RpcExpectation expectedRpc = expectations.poll(); + currentRequestIndex++; + + assertNotNull( + expectedRpc, "Unexpected request #" + currentRequestIndex + ": " + request.toString()); + assertEquals( + expectedRpc.expectedRequest, + request, + "Expected request #" + + currentRequestIndex + + " does not match actual request: " + + request.toString()); + for (ReadRowsResponse response : expectedRpc.responses) { + responseObserver.onNext(response); + } + + if (expectedRpc.statusCode.toStatus().isOk()) { + responseObserver.onCompleted(); + } else { + responseObserver.onError(expectedRpc.statusCode.toStatus().asRuntimeException()); + } + } + } + + private static class RpcExpectation { + + ReadRowsRequest expectedRequest; + Code statusCode; + List responses; + + private RpcExpectation() { + statusCode = Code.OK; + responses = new ArrayList<>(); + } + + static RpcExpectation create() { + return new RpcExpectation(); + } + + static ReadRowsRequest createRequest(String streamName, long offset) { + return ReadRowsRequest.newBuilder().setReadStream(streamName).setOffset(offset).build(); + } + + static ReadRowsResponse createResponse(int numberOfRows) { + return ReadRowsResponse.newBuilder().setRowCount(numberOfRows).build(); + } + + RpcExpectation expectRequest(String streamName, long offset) { + expectedRequest = createRequest(streamName, offset); + return this; + } + + RpcExpectation respondWithNumberOfRows(int numberOfRows) { + responses.add(createResponse(numberOfRows)); + return this; + } + + RpcExpectation respondWithStatus(Code code) { + this.statusCode = code; + return this; + } + } +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto new file mode 100644 index 000000000000..d878f7bdc953 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto @@ -0,0 +1,265 @@ +syntax = "proto2"; + +package com.google.cloud.bigquery.storage.test; + +message ComplexRoot { + optional int64 test_int = 1; + repeated string test_string = 2; + required bytes test_bytes = 3; + optional bool test_bool = 4; + repeated double test_double = 5; + required int32 test_date = 6; + optional int64 test_datetime = 7; + repeated int64 test_datetime_str = 8; + required ComplexLvl1 complex_lvl1 = 9; + required + ComplexLvl2 complex_lvl2 = 10; + optional bytes test_numeric = 11; + optional string test_geo = 12; + optional int64 test_timestamp = 13; + optional int64 test_time = 14; + optional int64 test_time_str = 15; + repeated bytes test_numeric_repeated = 16; + optional bytes test_numeric_str = 17; + optional bytes test_numeric_short = 18; + optional bytes test_numeric_int = 19; + optional bytes test_numeric_long = 20; + optional bytes test_numeric_float = 21; + optional bytes test_numeric_double = 22; + optional bytes test_bignumeric = 23; + repeated bytes test_bignumeric_str = 24; + optional bytes test_bignumeric_short = 25; + optional bytes test_bignumeric_int = 26; + optional bytes test_bignumeric_long = 27; + optional bytes test_bignumeric_float = 28; + optional bytes test_bignumeric_double = 29; + optional string test_interval = 30; + repeated string test_json = 31; + optional string test_timestamp_higher_precision = 32; + repeated string test_timestamp_higher_precision_repeated = 33; +} + +message CasingComplex { + optional int64 test_int = 1; + repeated string test_string = 2; + required bytes test_bytes = 3; + optional bool test_bool = 4; + repeated double test_double = 5; + required int32 test_date = 6; + required OptionTest option_test = 7; +} + +message ComplexLvl1 { + optional int64 test_int = 1; + required ComplexLvl2 complex_lvl2 = 2; +} + +message ComplexLvl2 { + optional int64 test_int = 1; +} + +message ObjectType { + optional ComplexLvl2 test_field_type = 1; +} + +message RepeatedType { + repeated int64 test_field_type = 1; +} + +message OptionTest { + optional int64 test_optional = 1; + required int64 test_required = 2; + repeated int64 test_repeated = 3; +} + +message ReuseRoot { + optional ReuseLvl1 reuse_lvl1 = 1; + optional ReuseLvl1 reuse_lvl1_1 = 2; + optional ReuseLvl1 reuse_lvl1_2 = 3; +} + +message ReuseLvl1 { + optional int64 test_int = 1; + optional ReuseLvl2 reuse_lvl2 = 2; +} + +message ReuseLvl2 { + optional int64 test_int = 1; +} + +message RepeatedInt64 { + repeated int64 test_repeated = 1; +} + +message RepeatedInt32 { + repeated int32 test_repeated = 1; + optional int32 test_non_repeated = 2; +} + +message RepeatedDouble { + repeated double test_repeated = 1; +} + +message RepeatedString { + repeated string test_repeated = 1; +} + +message RepeatedBool { + repeated bool test_repeated = 1; +} + +message RepeatedBytes { + repeated bytes test_repeated = 1; +} + +message RepeatedObject { + repeated ComplexLvl2 test_repeated = 1; +} + +message TestBool { + optional bool bool = 1; + optional bool lowercase = 2; + optional bool uppercase = 3; +} + +message TestInt64 { + optional int64 byte = 1; + optional int64 short = 2; + optional int64 int = 3; + optional int64 long = 4; + optional int64 string = 5; +} + +message TestInt32 { + optional int32 byte = 1; + optional int32 short = 2; + optional int32 int = 3; + optional int32 string = 4; +} + +message TestDouble { + optional double double = 1; + optional double float = 2; + optional double byte = 3; + optional double short = 4; + optional double int = 5; + optional double long = 6; + optional double string = 7; +} + +message TestTimestamp { + optional int64 test_string = 1; + optional int64 test_string_t_z = 2; + optional int64 test_long = 3; + optional int64 test_int = 4; + optional int64 test_float = 5; + optional int64 test_offset = 6; + optional int64 test_zero_offset = 7; + optional int64 test_timezone = 8; + optional int64 test_saformat = 9; +} + +message TestTimestampHigherPrecision { + optional string test_string = 1; + optional string test_string_t_z = 2; + optional string test_long = 3; + optional string test_int = 4; + optional string test_float = 5; + optional string test_offset = 6; + optional string test_zero_offset = 7; + optional string test_timezone = 8; + optional string test_saformat = 9; +} + +message TestRepeatedTimestamp { + repeated int64 test_string_repeated = 1; + repeated int64 test_string_t_z_repeated = 2; + repeated int64 test_long_repeated = 3; + repeated int64 test_int_repeated = 4; + repeated int64 test_float_repeated = 5; + repeated int64 test_offset_repeated = 6; + repeated int64 test_zero_offset_repeated = 7; + repeated int64 test_timezone_repeated = 8; + repeated int64 test_saformat_repeated = 9; +} + +message TestRepeatedTimestampHigherPrecision { + repeated string test_string_repeated = 1; + repeated string test_string_t_z_repeated = 2; + repeated string test_long_repeated = 3; + repeated string test_int_repeated = 4; + repeated string test_float_repeated = 5; + repeated string test_offset_repeated = 6; + repeated string test_zero_offset_repeated = 7; + repeated string test_timezone_repeated = 8; + repeated string test_saformat_repeated = 9; +} + +message TestDate { + optional int32 test_string = 1; + optional int32 test_long = 2; +} + +message NestedRepeated { + repeated int64 int = 1; + repeated double double = 2; + optional RepeatedString repeated_string = 3; +} + +message TestRequired { + optional double optional_double = 1; + required double required_double = 2; +} + +message TestRepeatedIsOptional { + optional double required_double = 1; + repeated double repeated_double = 2; +} + +message TopLevelMismatch { + optional double mismatch_double = 1; +} + +message TestDatetime { + optional int64 datetime = 1; +} + +message TestTime { + repeated int64 time = 1; +} + +message TestNumeric { + optional bytes numeric = 1; +} + +message TestBignumeric { + repeated bytes bignumeric = 1; +} + +message TestMixedCaseFieldNames { + required string foobar = 1; +} + +message TestRange { + optional TestRangeDate range_date = 1; + optional TestRangeDatetime range_datetime = 2; + optional TestRangeTimestamp range_timestamp = 3; + optional TestRangeDate range_date_mixed_case = 4; + optional TestRangeDatetime range_datetime_mixed_case = 5; + optional TestRangeTimestamp range_timestamp_mixed_case = 6; +} + +message TestRangeDate { + optional int32 start = 1; + optional int32 end = 2; +} + +message TestRangeDatetime { + optional int64 start = 1; + optional int64 end = 2; +} + +message TestRangeTimestamp { + optional int64 start = 1; + optional int64 end = 2; +} diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto/optionalTest.proto b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto/optionalTest.proto new file mode 100644 index 000000000000..41e13f05a382 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto/optionalTest.proto @@ -0,0 +1,26 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto3"; + +package com.google.cloud.bigquery.storage.test; + +option java_package = "com.google.cloud.bigquery.storage.test"; +option java_outer_classname = "TestOptional"; + +message FooOptionalType { + optional string foo = 1; +} + diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto/schemaTest.proto b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto/schemaTest.proto new file mode 100644 index 000000000000..1d43e094cda6 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto/schemaTest.proto @@ -0,0 +1,273 @@ +syntax = "proto2"; + +package com.google.cloud.bigquery.storage.test; + +import "google/cloud/bigquery/storage/v1/annotations.proto"; + +message SupportedTypes { + optional int32 int32_value = 1; + optional int64 int64_value = 2; + optional uint32 uint32_value = 3; + optional uint64 uint64_value = 4; + optional fixed32 fixed32_value = 7; + optional fixed64 fixed64_value = 8; + optional sfixed32 sfixed32_value = 9; + optional sfixed64 sfixed64_value = 10; + optional float float_value = 11; + optional double double_value = 12; + optional bool bool_value = 13; + optional bytes bytes_value = 14; + optional string string_value = 15; +} + +message NonSupportedTypes { + optional sint32 sint32_value = 5; + optional sint64 sint64_value = 6; +} + +message Int32Type { + optional int32 test_field_type = 1; +} + +message Int64Type { + optional int64 test_field_type = 1; +} + +message UInt32Type { + optional uint32 test_field_type = 1; +} + +message UInt64Type { + optional uint64 test_field_type = 1; +} + +message Fixed32Type { + optional fixed32 test_field_type = 1; +} + +message Fixed64Type { + optional fixed64 test_field_type = 1; +} + +message SFixed32Type { + optional sfixed32 test_field_type = 1; +} + +message SFixed64Type { + optional sfixed64 test_field_type = 1; +} + +message FloatType { + optional float test_field_type = 1; +} + +message DoubleType { + optional double test_field_type = 1; +} + +message BoolType { + optional bool test_field_type = 1; +} + +message BytesType { + optional bytes test_field_type = 1; +} + +message StringType { + optional string test_field_type = 1; +} + +message EnumType { + enum EnumTest { + test1 = 0; + } + optional EnumTest test_field_type = 1; +} + +message MessageType { + optional StringType test_field_type = 1; +} + +message GroupType { + optional group Test_field_type = 1 { + optional string test_field_type = 2; + } +} + +message MessageTypeMismatch { + optional MismatchLvl0 mismatchlvl0 = 1; +} + +message MismatchLvl0 { + optional MismatchLvl1 mismatchlvl1 = 1; +} + +message MismatchLvl1 { + optional string test_field_type = 1; +} + +message TopLevelMatch { + optional string match = 1; + optional MismatchLvl1 mismatch = 2; +} + +message ProtoRepeatedBQRepeated { + repeated int32 repeated_mode = 1; +} + +message ProtoOptionalBQRepeated { + optional int32 repeated_mode = 1; +} + +message ProtoRequiredBQRepeated { + required int32 repeated_mode = 1; +} + +message ProtoRequiredBQRequired { + required int32 required_mode = 1; +} + +message ProtoNoneBQRequired { + optional int32 no_required_mode = 1; +} + +message ProtoOptionalBQRequired { + optional int32 required_mode = 1; +} + +message ProtoRepeatedBQRequired { + repeated int32 required_mode = 1; +} + +message ProtoOptionalBQOptional { + optional int32 optional_mode = 1; +} + +message ProtoRequiredBQOptional{ + required int32 optional_mode = 1; +} + +message ProtoRepeatedBQOptional { + repeated int32 optional_mode = 1; +} + +message ProtoCompatibleWithBQInt { + optional int32 optional_mode = 1; +} + +message SupportedNestingLvl1 { + optional int32 int_value = 1; + optional SupportedNestingLvl2 nesting_value = 2; +} + +message SupportedNestingLvl2 { + optional int32 int_value = 1; +} + +message SupportedNestingStacked { + optional int32 test_int = 1; + optional SupportedNestingLvl2 nesting_value1 = 2; + optional SupportedNestingLvl2 nesting_value2 = 3; +} + +message NonSupportedMap { + map map_value = 1; +} + +message NonSupportedNestingRecursive { + optional NonSupportedNestingRecursive nesting_value = 2; +} + +message NonSupportedNestingContainsRecursive { + optional int32 int_value = 1; + optional NonSupportedNestingRecursive nesting_value = 2; +} + +message NonSupportedNestingLvl0 { + optional NonSupportedNestingLvl1 test1 = 1; +} + +message NonSupportedNestingLvl1 { + optional NonSupportedNestingLvl2 test1 = 1; +} + +message NonSupportedNestingLvl2 { + optional NonSupportedNestingLvl3 test1 = 1; +} + +message NonSupportedNestingLvl3 { + optional NonSupportedNestingLvl4 test1 = 1; +} + +message NonSupportedNestingLvl4 { + optional NonSupportedNestingLvl5 test1 = 1; +} + +message NonSupportedNestingLvl5 { + optional NonSupportedNestingLvl6 test1 = 1; +} + +message NonSupportedNestingLvl6 { + optional NonSupportedNestingLvl7 test1 = 1; +} + +message NonSupportedNestingLvl7 { + optional NonSupportedNestingLvl8 test1 = 1; +} + +message NonSupportedNestingLvl8 { + optional NonSupportedNestingLvl9 test1 = 1; +} + +message NonSupportedNestingLvl9 { + optional NonSupportedNestingLvl10 test1 = 1; +} + +message NonSupportedNestingLvl10 { + optional NonSupportedNestingLvl11 test1 = 1; +} + +message NonSupportedNestingLvl11 { + optional NonSupportedNestingLvl12 test1 = 1; +} + +message NonSupportedNestingLvl12 { + optional NonSupportedNestingLvl13 test1 = 1; +} + +message NonSupportedNestingLvl13 { + optional NonSupportedNestingLvl14 test1 = 1; +} + +message NonSupportedNestingLvl14 { + optional NonSupportedNestingLvl15 test1 = 1; +} + +message NonSupportedNestingLvl15 { + optional NonSupportedNestingLvl16 test1 = 1; +} + +message NonSupportedNestingLvl16 { + optional int32 test1 = 1; +} + +message AllowUnknownUnsupportedFields { + optional NonSupportedMap map_value = 1; + optional string string_value = 2; +} + +message FakeFooType { + optional int32 foo = 1; +} + +message TestNestedFlexibleFieldName { + optional string col_c3RyLeWIlw = 1 + [(.google.cloud.bigquery.storage.v1.column_name) = "str-列"]; + optional FlexibleNameField col_bmVzdGVkLeWIlw = 2 + [(.google.cloud.bigquery.storage.v1.column_name) = "nested-列"]; +} + +message FlexibleNameField { + optional int64 col_aW50LeWIlw = 1 + [(.google.cloud.bigquery.storage.v1.column_name) = "int-列"]; +} \ No newline at end of file diff --git a/java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto/test.proto b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto/test.proto new file mode 100644 index 000000000000..8b77a4eb4764 --- /dev/null +++ b/java-bigquerystorage/google-cloud-bigquerystorage/src/test/proto/test.proto @@ -0,0 +1,110 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; + +package com.google.cloud.bigquery.storage.test; + +import "google/cloud/bigquery/storage/v1/annotations.proto"; +import "google/protobuf/timestamp.proto"; + +enum TestEnum { + TestEnum0 = 0; + TestEnum1 = 1; +} + +message AllSupportedTypes { + optional int32 int32_value = 1; + optional int64 int64_value = 2; + optional uint32 uint32_value = 3; + optional uint64 uint64_value = 4; + optional float float_value = 5; + optional double double_value = 6; + optional bool bool_value = 7; + optional TestEnum enum_value = 8; + required string string_value = 9; +} + +message InnerType { + repeated string value = 1; +} + +message NestedType { + repeated InnerType inner_type = 1; +} + +message ComplicateType { + repeated NestedType nested_repeated_type = 1; + optional InnerType inner_type = 2; +} + +message SimpleTypeForDefaultValue { + optional string foo_with_default = 1; + optional string bar_without_default = 2; + optional string date_with_default_to_current = 3; +} + +message ContainsRecursive { + optional RecursiveType field = 1; +} + +message RecursiveType { + optional ContainsRecursive field = 2; +} + +message RecursiveTypeTopMessage { + optional RecursiveTypeTopMessage field = 2; +} + +message FooType { + optional string foo = 1; +} + +message UpdatedFooType { + optional string foo = 1; + optional string bar = 2; +} + +message UpdatedFooType2 { + optional string foo = 1; + optional string bar = 2; + optional string baz = 3; +} + +message FooTimestampType { + optional string foo = 1; + optional .google.protobuf.Timestamp bar = 2; +} + +message DuplicateType { + optional TestEnum f1 = 1; + optional TestEnum f2 = 2; + optional ComplicateType f3 = 3; + optional ComplicateType f4 = 4; +} + +message FlexibleType { + optional string col_dGVzdC3liJc = 1 + [(.google.cloud.bigquery.storage.v1.column_name) = "test-列"]; +} + +message RepetitionType { + required bytes a = 1; + required bytes b = 2; + required bytes c = 3; + repeated bytes aa = 4; + repeated bytes bb = 5; + repeated bytes cc = 6; +} \ No newline at end of file diff --git a/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1/pom.xml b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1/pom.xml new file mode 100644 index 000000000000..80fbbd97f424 --- /dev/null +++ b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1/pom.xml @@ -0,0 +1,50 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1 + 3.19.1 + grpc-google-cloud-bigquerystorage-v1 + GRPC library for grpc-google-cloud-bigquerystorage-v1 + + com.google.cloud + google-cloud-bigquerystorage-parent + 3.19.1 + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1 + + + com.google.guava + guava + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + diff --git a/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadGrpc.java b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadGrpc.java new file mode 100644 index 000000000000..9fccc968a4b1 --- /dev/null +++ b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadGrpc.java @@ -0,0 +1,828 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *

+ * BigQuery Read API.
+ * The Read API can be used to read data from BigQuery.
+ * 
+ */ +@io.grpc.stub.annotations.GrpcGenerated +public final class BigQueryReadGrpc { + + private BigQueryReadGrpc() {} + + public static final java.lang.String SERVICE_NAME = + "google.cloud.bigquery.storage.v1.BigQueryRead"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1.ReadSession> + getCreateReadSessionMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateReadSession", + requestType = com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.ReadSession.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1.ReadSession> + getCreateReadSessionMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1.ReadSession> + getCreateReadSessionMethod; + if ((getCreateReadSessionMethod = BigQueryReadGrpc.getCreateReadSessionMethod) == null) { + synchronized (BigQueryReadGrpc.class) { + if ((getCreateReadSessionMethod = BigQueryReadGrpc.getCreateReadSessionMethod) == null) { + BigQueryReadGrpc.getCreateReadSessionMethod = + getCreateReadSessionMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateReadSession")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.ReadSession + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryReadMethodDescriptorSupplier("CreateReadSession")) + .build(); + } + } + } + return getCreateReadSessionMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1.ReadRowsResponse> + getReadRowsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ReadRows", + requestType = com.google.cloud.bigquery.storage.v1.ReadRowsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.ReadRowsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1.ReadRowsResponse> + getReadRowsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1.ReadRowsResponse> + getReadRowsMethod; + if ((getReadRowsMethod = BigQueryReadGrpc.getReadRowsMethod) == null) { + synchronized (BigQueryReadGrpc.class) { + if ((getReadRowsMethod = BigQueryReadGrpc.getReadRowsMethod) == null) { + BigQueryReadGrpc.getReadRowsMethod = + getReadRowsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ReadRows")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.ReadRowsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.ReadRowsResponse + .getDefaultInstance())) + .setSchemaDescriptor(new BigQueryReadMethodDescriptorSupplier("ReadRows")) + .build(); + } + } + } + return getReadRowsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse> + getSplitReadStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "SplitReadStream", + requestType = com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse> + getSplitReadStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse> + getSplitReadStreamMethod; + if ((getSplitReadStreamMethod = BigQueryReadGrpc.getSplitReadStreamMethod) == null) { + synchronized (BigQueryReadGrpc.class) { + if ((getSplitReadStreamMethod = BigQueryReadGrpc.getSplitReadStreamMethod) == null) { + BigQueryReadGrpc.getSplitReadStreamMethod = + getSplitReadStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "SplitReadStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryReadMethodDescriptorSupplier("SplitReadStream")) + .build(); + } + } + } + return getSplitReadStreamMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static BigQueryReadStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryReadStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadStub(channel, callOptions); + } + }; + return BigQueryReadStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static BigQueryReadBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryReadBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadBlockingV2Stub(channel, callOptions); + } + }; + return BigQueryReadBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static BigQueryReadBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryReadBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadBlockingStub(channel, callOptions); + } + }; + return BigQueryReadBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static BigQueryReadFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryReadFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadFutureStub(channel, callOptions); + } + }; + return BigQueryReadFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * 
+ */ + public interface AsyncService { + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + default void createReadSession( + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateReadSessionMethod(), responseObserver); + } + + /** + * + * + *
+     * Reads rows from the stream in the format prescribed by the ReadSession.
+     * Each response contains one or more table rows, up to a maximum of 128 MB
+     * per response; read requests which attempt to read individual rows larger
+     * than 128 MB will fail.
+     * Each request also returns a set of stream statistics reflecting the current
+     * state of the stream.
+     * 
+ */ + default void readRows( + com.google.cloud.bigquery.storage.v1.ReadRowsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getReadRowsMethod(), responseObserver); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + default void splitReadStream( + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getSplitReadStreamMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service BigQueryRead. + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * 
+ */ + public abstract static class BigQueryReadImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return BigQueryReadGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service BigQueryRead. + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * 
+ */ + public static final class BigQueryReadStub + extends io.grpc.stub.AbstractAsyncStub { + private BigQueryReadStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryReadStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public void createReadSession( + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateReadSessionMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Reads rows from the stream in the format prescribed by the ReadSession.
+     * Each response contains one or more table rows, up to a maximum of 128 MB
+     * per response; read requests which attempt to read individual rows larger
+     * than 128 MB will fail.
+     * Each request also returns a set of stream statistics reflecting the current
+     * state of the stream.
+     * 
+ */ + public void readRows( + com.google.cloud.bigquery.storage.v1.ReadRowsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncServerStreamingCall( + getChannel().newCall(getReadRowsMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + public void splitReadStream( + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getSplitReadStreamMethod(), getCallOptions()), + request, + responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service BigQueryRead. + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * 
+ */ + public static final class BigQueryReadBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private BigQueryReadBlockingV2Stub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryReadBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.ReadSession createReadSession( + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateReadSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Reads rows from the stream in the format prescribed by the ReadSession.
+     * Each response contains one or more table rows, up to a maximum of 128 MB
+     * per response; read requests which attempt to read individual rows larger
+     * than 128 MB will fail.
+     * Each request also returns a set of stream statistics reflecting the current
+     * state of the stream.
+     * 
+ */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall + readRows(com.google.cloud.bigquery.storage.v1.ReadRowsRequest request) { + return io.grpc.stub.ClientCalls.blockingV2ServerStreamingCall( + getChannel(), getReadRowsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse splitReadStream( + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getSplitReadStreamMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service BigQueryRead. + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * 
+ */ + public static final class BigQueryReadBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private BigQueryReadBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryReadBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.ReadSession createReadSession( + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateReadSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Reads rows from the stream in the format prescribed by the ReadSession.
+     * Each response contains one or more table rows, up to a maximum of 128 MB
+     * per response; read requests which attempt to read individual rows larger
+     * than 128 MB will fail.
+     * Each request also returns a set of stream statistics reflecting the current
+     * state of the stream.
+     * 
+ */ + public java.util.Iterator readRows( + com.google.cloud.bigquery.storage.v1.ReadRowsRequest request) { + return io.grpc.stub.ClientCalls.blockingServerStreamingCall( + getChannel(), getReadRowsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse splitReadStream( + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getSplitReadStreamMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service BigQueryRead. + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * 
+ */ + public static final class BigQueryReadFutureStub + extends io.grpc.stub.AbstractFutureStub { + private BigQueryReadFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryReadFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1.ReadSession> + createReadSession(com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateReadSessionMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse> + splitReadStream(com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getSplitReadStreamMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_READ_SESSION = 0; + private static final int METHODID_READ_ROWS = 1; + private static final int METHODID_SPLIT_READ_STREAM = 2; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_READ_SESSION: + serviceImpl.createReadSession( + (com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_READ_ROWS: + serviceImpl.readRows( + (com.google.cloud.bigquery.storage.v1.ReadRowsRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_SPLIT_READ_STREAM: + serviceImpl.splitReadStream( + (com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse>) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateReadSessionMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1.ReadSession>( + service, METHODID_CREATE_READ_SESSION))) + .addMethod( + getReadRowsMethod(), + io.grpc.stub.ServerCalls.asyncServerStreamingCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1.ReadRowsResponse>( + service, METHODID_READ_ROWS))) + .addMethod( + getSplitReadStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse>( + service, METHODID_SPLIT_READ_STREAM))) + .build(); + } + + private abstract static class BigQueryReadBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + BigQueryReadBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("BigQueryRead"); + } + } + + private static final class BigQueryReadFileDescriptorSupplier + extends BigQueryReadBaseDescriptorSupplier { + BigQueryReadFileDescriptorSupplier() {} + } + + private static final class BigQueryReadMethodDescriptorSupplier + extends BigQueryReadBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + BigQueryReadMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (BigQueryReadGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new BigQueryReadFileDescriptorSupplier()) + .addMethod(getCreateReadSessionMethod()) + .addMethod(getReadRowsMethod()) + .addMethod(getSplitReadStreamMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java new file mode 100644 index 000000000000..30ec1e6364f8 --- /dev/null +++ b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java @@ -0,0 +1,1246 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
+ * BigQuery Write API.
+ * The Write API can be used to write data to BigQuery.
+ * For supplementary information about the Write API, see:
+ * https://cloud.google.com/bigquery/docs/write-api
+ * 
+ */ +@io.grpc.stub.annotations.GrpcGenerated +public final class BigQueryWriteGrpc { + + private BigQueryWriteGrpc() {} + + public static final java.lang.String SERVICE_NAME = + "google.cloud.bigquery.storage.v1.BigQueryWrite"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream> + getCreateWriteStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateWriteStream", + requestType = com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.WriteStream.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream> + getCreateWriteStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream> + getCreateWriteStreamMethod; + if ((getCreateWriteStreamMethod = BigQueryWriteGrpc.getCreateWriteStreamMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getCreateWriteStreamMethod = BigQueryWriteGrpc.getCreateWriteStreamMethod) == null) { + BigQueryWriteGrpc.getCreateWriteStreamMethod = + getCreateWriteStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateWriteStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.WriteStream + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("CreateWriteStream")) + .build(); + } + } + } + return getCreateWriteStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse> + getAppendRowsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "AppendRows", + requestType = com.google.cloud.bigquery.storage.v1.AppendRowsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.AppendRowsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse> + getAppendRowsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse> + getAppendRowsMethod; + if ((getAppendRowsMethod = BigQueryWriteGrpc.getAppendRowsMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getAppendRowsMethod = BigQueryWriteGrpc.getAppendRowsMethod) == null) { + BigQueryWriteGrpc.getAppendRowsMethod = + getAppendRowsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "AppendRows")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse + .getDefaultInstance())) + .setSchemaDescriptor(new BigQueryWriteMethodDescriptorSupplier("AppendRows")) + .build(); + } + } + } + return getAppendRowsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream> + getGetWriteStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetWriteStream", + requestType = com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.WriteStream.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream> + getGetWriteStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream> + getGetWriteStreamMethod; + if ((getGetWriteStreamMethod = BigQueryWriteGrpc.getGetWriteStreamMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getGetWriteStreamMethod = BigQueryWriteGrpc.getGetWriteStreamMethod) == null) { + BigQueryWriteGrpc.getGetWriteStreamMethod = + getGetWriteStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetWriteStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.WriteStream + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("GetWriteStream")) + .build(); + } + } + } + return getGetWriteStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse> + getFinalizeWriteStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "FinalizeWriteStream", + requestType = com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse> + getFinalizeWriteStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse> + getFinalizeWriteStreamMethod; + if ((getFinalizeWriteStreamMethod = BigQueryWriteGrpc.getFinalizeWriteStreamMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getFinalizeWriteStreamMethod = BigQueryWriteGrpc.getFinalizeWriteStreamMethod) + == null) { + BigQueryWriteGrpc.getFinalizeWriteStreamMethod = + getFinalizeWriteStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "FinalizeWriteStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("FinalizeWriteStream")) + .build(); + } + } + } + return getFinalizeWriteStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse> + getBatchCommitWriteStreamsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchCommitWriteStreams", + requestType = com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse> + getBatchCommitWriteStreamsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse> + getBatchCommitWriteStreamsMethod; + if ((getBatchCommitWriteStreamsMethod = BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod) + == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getBatchCommitWriteStreamsMethod = BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod) + == null) { + BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod = + getBatchCommitWriteStreamsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchCommitWriteStreams")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("BatchCommitWriteStreams")) + .build(); + } + } + } + return getBatchCommitWriteStreamsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.FlushRowsRequest, + com.google.cloud.bigquery.storage.v1.FlushRowsResponse> + getFlushRowsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "FlushRows", + requestType = com.google.cloud.bigquery.storage.v1.FlushRowsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1.FlushRowsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.FlushRowsRequest, + com.google.cloud.bigquery.storage.v1.FlushRowsResponse> + getFlushRowsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1.FlushRowsRequest, + com.google.cloud.bigquery.storage.v1.FlushRowsResponse> + getFlushRowsMethod; + if ((getFlushRowsMethod = BigQueryWriteGrpc.getFlushRowsMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getFlushRowsMethod = BigQueryWriteGrpc.getFlushRowsMethod) == null) { + BigQueryWriteGrpc.getFlushRowsMethod = + getFlushRowsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "FlushRows")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1.FlushRowsResponse + .getDefaultInstance())) + .setSchemaDescriptor(new BigQueryWriteMethodDescriptorSupplier("FlushRows")) + .build(); + } + } + } + return getFlushRowsMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static BigQueryWriteStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryWriteStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteStub(channel, callOptions); + } + }; + return BigQueryWriteStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static BigQueryWriteBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryWriteBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteBlockingV2Stub(channel, callOptions); + } + }; + return BigQueryWriteBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static BigQueryWriteBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryWriteBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteBlockingStub(channel, callOptions); + } + }; + return BigQueryWriteBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static BigQueryWriteFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryWriteFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteFutureStub(channel, callOptions); + } + }; + return BigQueryWriteFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * For supplementary information about the Write API, see:
+   * https://cloud.google.com/bigquery/docs/write-api
+   * 
+ */ + public interface AsyncService { + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + default void createWriteStream( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateWriteStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Appends data to the given stream.
+     * If `offset` is specified, the `offset` is checked against the end of
+     * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+     * attempt is made to append to an offset beyond the current end of the stream
+     * or `ALREADY_EXISTS` if user provides an `offset` that has already been
+     * written to. User can retry with adjusted offset within the same RPC
+     * connection. If `offset` is not specified, append happens at the end of the
+     * stream.
+     * The response contains an optional offset at which the append
+     * happened.  No offset information will be returned for appends to a
+     * default stream.
+     * Responses are received in the same order in which requests are sent.
+     * There will be one response for each successful inserted request.  Responses
+     * may optionally embed error information if the originating AppendRequest was
+     * not successfully processed.
+     * The specifics of when successfully appended data is made visible to the
+     * table are governed by the type of stream:
+     * * For COMMITTED streams (which includes the default stream), data is
+     * visible immediately upon successful append.
+     * * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
+     * rpc which advances a cursor to a newer offset in the stream.
+     * * For PENDING streams, data is not made visible until the stream itself is
+     * finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
+     * committed via the `BatchCommitWriteStreams` rpc.
+     * 
+ */ + default io.grpc.stub.StreamObserver + appendRows( + io.grpc.stub.StreamObserver + responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall( + getAppendRowsMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets information about a write stream.
+     * 
+ */ + default void getWriteStream( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetWriteStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + default void finalizeWriteStream( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getFinalizeWriteStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + default void batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchCommitWriteStreamsMethod(), responseObserver); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + default void flushRows( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getFlushRowsMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service BigQueryWrite. + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * For supplementary information about the Write API, see:
+   * https://cloud.google.com/bigquery/docs/write-api
+   * 
+ */ + public abstract static class BigQueryWriteImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return BigQueryWriteGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service BigQueryWrite. + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * For supplementary information about the Write API, see:
+   * https://cloud.google.com/bigquery/docs/write-api
+   * 
+ */ + public static final class BigQueryWriteStub + extends io.grpc.stub.AbstractAsyncStub { + private BigQueryWriteStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + public void createWriteStream( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateWriteStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Appends data to the given stream.
+     * If `offset` is specified, the `offset` is checked against the end of
+     * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+     * attempt is made to append to an offset beyond the current end of the stream
+     * or `ALREADY_EXISTS` if user provides an `offset` that has already been
+     * written to. User can retry with adjusted offset within the same RPC
+     * connection. If `offset` is not specified, append happens at the end of the
+     * stream.
+     * The response contains an optional offset at which the append
+     * happened.  No offset information will be returned for appends to a
+     * default stream.
+     * Responses are received in the same order in which requests are sent.
+     * There will be one response for each successful inserted request.  Responses
+     * may optionally embed error information if the originating AppendRequest was
+     * not successfully processed.
+     * The specifics of when successfully appended data is made visible to the
+     * table are governed by the type of stream:
+     * * For COMMITTED streams (which includes the default stream), data is
+     * visible immediately upon successful append.
+     * * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
+     * rpc which advances a cursor to a newer offset in the stream.
+     * * For PENDING streams, data is not made visible until the stream itself is
+     * finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
+     * committed via the `BatchCommitWriteStreams` rpc.
+     * 
+ */ + public io.grpc.stub.StreamObserver + appendRows( + io.grpc.stub.StreamObserver + responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getAppendRowsMethod(), getCallOptions()), responseObserver); + } + + /** + * + * + *
+     * Gets information about a write stream.
+     * 
+ */ + public void getWriteStream( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetWriteStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + public void finalizeWriteStream( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getFinalizeWriteStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + public void batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchCommitWriteStreamsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + public void flushRows( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getFlushRowsMethod(), getCallOptions()), request, responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service BigQueryWrite. + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * For supplementary information about the Write API, see:
+   * https://cloud.google.com/bigquery/docs/write-api
+   * 
+ */ + public static final class BigQueryWriteBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private BigQueryWriteBlockingV2Stub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.WriteStream createWriteStream( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Appends data to the given stream.
+     * If `offset` is specified, the `offset` is checked against the end of
+     * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+     * attempt is made to append to an offset beyond the current end of the stream
+     * or `ALREADY_EXISTS` if user provides an `offset` that has already been
+     * written to. User can retry with adjusted offset within the same RPC
+     * connection. If `offset` is not specified, append happens at the end of the
+     * stream.
+     * The response contains an optional offset at which the append
+     * happened.  No offset information will be returned for appends to a
+     * default stream.
+     * Responses are received in the same order in which requests are sent.
+     * There will be one response for each successful inserted request.  Responses
+     * may optionally embed error information if the originating AppendRequest was
+     * not successfully processed.
+     * The specifics of when successfully appended data is made visible to the
+     * table are governed by the type of stream:
+     * * For COMMITTED streams (which includes the default stream), data is
+     * visible immediately upon successful append.
+     * * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
+     * rpc which advances a cursor to a newer offset in the stream.
+     * * For PENDING streams, data is not made visible until the stream itself is
+     * finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
+     * committed via the `BatchCommitWriteStreams` rpc.
+     * 
+ */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse> + appendRows() { + return io.grpc.stub.ClientCalls.blockingBidiStreamingCall( + getChannel(), getAppendRowsMethod(), getCallOptions()); + } + + /** + * + * + *
+     * Gets information about a write stream.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.WriteStream getWriteStream( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse finalizeWriteStream( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getFinalizeWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchCommitWriteStreamsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.FlushRowsResponse flushRows( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getFlushRowsMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service BigQueryWrite. + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * For supplementary information about the Write API, see:
+   * https://cloud.google.com/bigquery/docs/write-api
+   * 
+ */ + public static final class BigQueryWriteBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private BigQueryWriteBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.WriteStream createWriteStream( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets information about a write stream.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.WriteStream getWriteStream( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse finalizeWriteStream( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getFinalizeWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchCommitWriteStreamsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1.FlushRowsResponse flushRows( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getFlushRowsMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service BigQueryWrite. + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * For supplementary information about the Write API, see:
+   * https://cloud.google.com/bigquery/docs/write-api
+   * 
+ */ + public static final class BigQueryWriteFutureStub + extends io.grpc.stub.AbstractFutureStub { + private BigQueryWriteFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1.WriteStream> + createWriteStream(com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateWriteStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Gets information about a write stream.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1.WriteStream> + getWriteStream(com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetWriteStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse> + finalizeWriteStream( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getFinalizeWriteStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse> + batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchCommitWriteStreamsMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1.FlushRowsResponse> + flushRows(com.google.cloud.bigquery.storage.v1.FlushRowsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getFlushRowsMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_WRITE_STREAM = 0; + private static final int METHODID_GET_WRITE_STREAM = 1; + private static final int METHODID_FINALIZE_WRITE_STREAM = 2; + private static final int METHODID_BATCH_COMMIT_WRITE_STREAMS = 3; + private static final int METHODID_FLUSH_ROWS = 4; + private static final int METHODID_APPEND_ROWS = 5; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_WRITE_STREAM: + serviceImpl.createWriteStream( + (com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_WRITE_STREAM: + serviceImpl.getWriteStream( + (com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_FINALIZE_WRITE_STREAM: + serviceImpl.finalizeWriteStream( + (com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse>) + responseObserver); + break; + case METHODID_BATCH_COMMIT_WRITE_STREAMS: + serviceImpl.batchCommitWriteStreams( + (com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse>) + responseObserver); + break; + case METHODID_FLUSH_ROWS: + serviceImpl.flushRows( + (com.google.cloud.bigquery.storage.v1.FlushRowsRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_APPEND_ROWS: + return (io.grpc.stub.StreamObserver) + serviceImpl.appendRows( + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1.AppendRowsResponse>) + responseObserver); + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateWriteStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream>( + service, METHODID_CREATE_WRITE_STREAM))) + .addMethod( + getAppendRowsMethod(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse>( + service, METHODID_APPEND_ROWS))) + .addMethod( + getGetWriteStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.WriteStream>( + service, METHODID_GET_WRITE_STREAM))) + .addMethod( + getFinalizeWriteStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse>( + service, METHODID_FINALIZE_WRITE_STREAM))) + .addMethod( + getBatchCommitWriteStreamsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse>( + service, METHODID_BATCH_COMMIT_WRITE_STREAMS))) + .addMethod( + getFlushRowsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1.FlushRowsRequest, + com.google.cloud.bigquery.storage.v1.FlushRowsResponse>( + service, METHODID_FLUSH_ROWS))) + .build(); + } + + private abstract static class BigQueryWriteBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + BigQueryWriteBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("BigQueryWrite"); + } + } + + private static final class BigQueryWriteFileDescriptorSupplier + extends BigQueryWriteBaseDescriptorSupplier { + BigQueryWriteFileDescriptorSupplier() {} + } + + private static final class BigQueryWriteMethodDescriptorSupplier + extends BigQueryWriteBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + BigQueryWriteMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (BigQueryWriteGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new BigQueryWriteFileDescriptorSupplier()) + .addMethod(getCreateWriteStreamMethod()) + .addMethod(getAppendRowsMethod()) + .addMethod(getGetWriteStreamMethod()) + .addMethod(getFinalizeWriteStreamMethod()) + .addMethod(getBatchCommitWriteStreamsMethod()) + .addMethod(getFlushRowsMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1alpha/pom.xml b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1alpha/pom.xml new file mode 100644 index 000000000000..7a6980e6a184 --- /dev/null +++ b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1alpha/pom.xml @@ -0,0 +1,64 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1alpha + 3.19.1 + grpc-google-cloud-bigquerystorage-v1alpha + GRPC library for google-cloud-bigquerystorage + + com.google.cloud + google-cloud-bigquerystorage-parent + 3.19.1 + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1alpha + + + com.google.guava + guava + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + diff --git a/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceGrpc.java b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceGrpc.java new file mode 100644 index 000000000000..f932748238dd --- /dev/null +++ b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceGrpc.java @@ -0,0 +1,1052 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1alpha; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
+ * BigQuery Metastore Partition Service API.
+ *  This service is used for managing metastore partitions in BigQuery
+ *  metastore. The service supports only batch operations for write.
+ * 
+ */ +@io.grpc.stub.annotations.GrpcGenerated +public final class MetastorePartitionServiceGrpc { + + private MetastorePartitionServiceGrpc() {} + + public static final java.lang.String SERVICE_NAME = + "google.cloud.bigquery.storage.v1alpha.MetastorePartitionService"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse> + getBatchCreateMetastorePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchCreateMetastorePartitions", + requestType = + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest.class, + responseType = + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse> + getBatchCreateMetastorePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse> + getBatchCreateMetastorePartitionsMethod; + if ((getBatchCreateMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getBatchCreateMetastorePartitionsMethod) + == null) { + synchronized (MetastorePartitionServiceGrpc.class) { + if ((getBatchCreateMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getBatchCreateMetastorePartitionsMethod) + == null) { + MetastorePartitionServiceGrpc.getBatchCreateMetastorePartitionsMethod = + getBatchCreateMetastorePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchCreateMetastorePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha + .BatchCreateMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha + .BatchCreateMetastorePartitionsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new MetastorePartitionServiceMethodDescriptorSupplier( + "BatchCreateMetastorePartitions")) + .build(); + } + } + } + return getBatchCreateMetastorePartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest, + com.google.protobuf.Empty> + getBatchDeleteMetastorePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchDeleteMetastorePartitions", + requestType = + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest, + com.google.protobuf.Empty> + getBatchDeleteMetastorePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest, + com.google.protobuf.Empty> + getBatchDeleteMetastorePartitionsMethod; + if ((getBatchDeleteMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getBatchDeleteMetastorePartitionsMethod) + == null) { + synchronized (MetastorePartitionServiceGrpc.class) { + if ((getBatchDeleteMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getBatchDeleteMetastorePartitionsMethod) + == null) { + MetastorePartitionServiceGrpc.getBatchDeleteMetastorePartitionsMethod = + getBatchDeleteMetastorePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchDeleteMetastorePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha + .BatchDeleteMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new MetastorePartitionServiceMethodDescriptorSupplier( + "BatchDeleteMetastorePartitions")) + .build(); + } + } + } + return getBatchDeleteMetastorePartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse> + getBatchUpdateMetastorePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchUpdateMetastorePartitions", + requestType = + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest.class, + responseType = + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse> + getBatchUpdateMetastorePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse> + getBatchUpdateMetastorePartitionsMethod; + if ((getBatchUpdateMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getBatchUpdateMetastorePartitionsMethod) + == null) { + synchronized (MetastorePartitionServiceGrpc.class) { + if ((getBatchUpdateMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getBatchUpdateMetastorePartitionsMethod) + == null) { + MetastorePartitionServiceGrpc.getBatchUpdateMetastorePartitionsMethod = + getBatchUpdateMetastorePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchUpdateMetastorePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha + .BatchUpdateMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha + .BatchUpdateMetastorePartitionsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new MetastorePartitionServiceMethodDescriptorSupplier( + "BatchUpdateMetastorePartitions")) + .build(); + } + } + } + return getBatchUpdateMetastorePartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse> + getListMetastorePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListMetastorePartitions", + requestType = com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest.class, + responseType = + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse> + getListMetastorePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse> + getListMetastorePartitionsMethod; + if ((getListMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getListMetastorePartitionsMethod) + == null) { + synchronized (MetastorePartitionServiceGrpc.class) { + if ((getListMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getListMetastorePartitionsMethod) + == null) { + MetastorePartitionServiceGrpc.getListMetastorePartitionsMethod = + getListMetastorePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ListMetastorePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha + .ListMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha + .ListMetastorePartitionsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new MetastorePartitionServiceMethodDescriptorSupplier( + "ListMetastorePartitions")) + .build(); + } + } + } + return getListMetastorePartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse> + getStreamMetastorePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "StreamMetastorePartitions", + requestType = + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest.class, + responseType = + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse> + getStreamMetastorePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse> + getStreamMetastorePartitionsMethod; + if ((getStreamMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getStreamMetastorePartitionsMethod) + == null) { + synchronized (MetastorePartitionServiceGrpc.class) { + if ((getStreamMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getStreamMetastorePartitionsMethod) + == null) { + MetastorePartitionServiceGrpc.getStreamMetastorePartitionsMethod = + getStreamMetastorePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "StreamMetastorePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha + .StreamMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha + .StreamMetastorePartitionsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new MetastorePartitionServiceMethodDescriptorSupplier( + "StreamMetastorePartitions")) + .build(); + } + } + } + return getStreamMetastorePartitionsMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static MetastorePartitionServiceStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public MetastorePartitionServiceStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceStub(channel, callOptions); + } + }; + return MetastorePartitionServiceStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static MetastorePartitionServiceBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public MetastorePartitionServiceBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceBlockingV2Stub(channel, callOptions); + } + }; + return MetastorePartitionServiceBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static MetastorePartitionServiceBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public MetastorePartitionServiceBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceBlockingStub(channel, callOptions); + } + }; + return MetastorePartitionServiceBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static MetastorePartitionServiceFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public MetastorePartitionServiceFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceFutureStub(channel, callOptions); + } + }; + return MetastorePartitionServiceFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * BigQuery Metastore Partition Service API.
+   *  This service is used for managing metastore partitions in BigQuery
+   *  metastore. The service supports only batch operations for write.
+   * 
+ */ + public interface AsyncService { + + /** + * + * + *
+     * Adds metastore partitions to a table.
+     * 
+ */ + default void batchCreateMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchCreateMetastorePartitionsMethod(), responseObserver); + } + + /** + * + * + *
+     * Deletes metastore partitions from a table.
+     * 
+ */ + default void batchDeleteMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchDeleteMetastorePartitionsMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates metastore partitions in a table.
+     * 
+ */ + default void batchUpdateMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchUpdateMetastorePartitionsMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets metastore partitions from a table.
+     * 
+ */ + default void listMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListMetastorePartitionsMethod(), responseObserver); + } + + /** + * + * + *
+     * This is a bi-di streaming rpc method that allows the client to send
+     * a stream of partitions and commit all of them atomically at the end.
+     * If the commit is successful, the server will return a
+     * response and close the stream. If the commit fails (due to duplicate
+     * partitions or other reason), the server will close the stream with an
+     * error. This method is only available via the gRPC API (not REST).
+     * 
+ */ + default io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest> + streamMetastorePartitions( + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse> + responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall( + getStreamMetastorePartitionsMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service MetastorePartitionService. + * + *
+   * BigQuery Metastore Partition Service API.
+   *  This service is used for managing metastore partitions in BigQuery
+   *  metastore. The service supports only batch operations for write.
+   * 
+ */ + public abstract static class MetastorePartitionServiceImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return MetastorePartitionServiceGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service MetastorePartitionService. + * + *
+   * BigQuery Metastore Partition Service API.
+   *  This service is used for managing metastore partitions in BigQuery
+   *  metastore. The service supports only batch operations for write.
+   * 
+ */ + public static final class MetastorePartitionServiceStub + extends io.grpc.stub.AbstractAsyncStub { + private MetastorePartitionServiceStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected MetastorePartitionServiceStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceStub(channel, callOptions); + } + + /** + * + * + *
+     * Adds metastore partitions to a table.
+     * 
+ */ + public void batchCreateMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchCreateMetastorePartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Deletes metastore partitions from a table.
+     * 
+ */ + public void batchDeleteMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchDeleteMetastorePartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates metastore partitions in a table.
+     * 
+ */ + public void batchUpdateMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchUpdateMetastorePartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Gets metastore partitions from a table.
+     * 
+ */ + public void listMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListMetastorePartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * This is a bi-di streaming rpc method that allows the client to send
+     * a stream of partitions and commit all of them atomically at the end.
+     * If the commit is successful, the server will return a
+     * response and close the stream. If the commit fails (due to duplicate
+     * partitions or other reason), the server will close the stream with an
+     * error. This method is only available via the gRPC API (not REST).
+     * 
+ */ + public io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest> + streamMetastorePartitions( + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse> + responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getStreamMetastorePartitionsMethod(), getCallOptions()), + responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service MetastorePartitionService. + * + *
+   * BigQuery Metastore Partition Service API.
+   *  This service is used for managing metastore partitions in BigQuery
+   *  metastore. The service supports only batch operations for write.
+   * 
+ */ + public static final class MetastorePartitionServiceBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private MetastorePartitionServiceBlockingV2Stub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected MetastorePartitionServiceBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
+     * Adds metastore partitions to a table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + batchCreateMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchCreateMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes metastore partitions from a table.
+     * 
+ */ + public com.google.protobuf.Empty batchDeleteMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchDeleteMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates metastore partitions in a table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + batchUpdateMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchUpdateMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets metastore partitions from a table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse + listMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * This is a bi-di streaming rpc method that allows the client to send
+     * a stream of partitions and commit all of them atomically at the end.
+     * If the commit is successful, the server will return a
+     * response and close the stream. If the commit fails (due to duplicate
+     * partitions or other reason), the server will close the stream with an
+     * error. This method is only available via the gRPC API (not REST).
+     * 
+ */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall< + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse> + streamMetastorePartitions() { + return io.grpc.stub.ClientCalls.blockingBidiStreamingCall( + getChannel(), getStreamMetastorePartitionsMethod(), getCallOptions()); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service + * MetastorePartitionService. + * + *
+   * BigQuery Metastore Partition Service API.
+   *  This service is used for managing metastore partitions in BigQuery
+   *  metastore. The service supports only batch operations for write.
+   * 
+ */ + public static final class MetastorePartitionServiceBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private MetastorePartitionServiceBlockingStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected MetastorePartitionServiceBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Adds metastore partitions to a table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + batchCreateMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchCreateMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes metastore partitions from a table.
+     * 
+ */ + public com.google.protobuf.Empty batchDeleteMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchDeleteMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates metastore partitions in a table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + batchUpdateMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchUpdateMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets metastore partitions from a table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse + listMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListMetastorePartitionsMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service + * MetastorePartitionService. + * + *
+   * BigQuery Metastore Partition Service API.
+   *  This service is used for managing metastore partitions in BigQuery
+   *  metastore. The service supports only batch operations for write.
+   * 
+ */ + public static final class MetastorePartitionServiceFutureStub + extends io.grpc.stub.AbstractFutureStub { + private MetastorePartitionServiceFutureStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected MetastorePartitionServiceFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Adds metastore partitions to a table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchCreateMetastorePartitionsMethod(), getCallOptions()), + request); + } + + /** + * + * + *
+     * Deletes metastore partitions from a table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + batchDeleteMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchDeleteMetastorePartitionsMethod(), getCallOptions()), + request); + } + + /** + * + * + *
+     * Updates metastore partitions in a table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchUpdateMetastorePartitionsMethod(), getCallOptions()), + request); + } + + /** + * + * + *
+     * Gets metastore partitions from a table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse> + listMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListMetastorePartitionsMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_BATCH_CREATE_METASTORE_PARTITIONS = 0; + private static final int METHODID_BATCH_DELETE_METASTORE_PARTITIONS = 1; + private static final int METHODID_BATCH_UPDATE_METASTORE_PARTITIONS = 2; + private static final int METHODID_LIST_METASTORE_PARTITIONS = 3; + private static final int METHODID_STREAM_METASTORE_PARTITIONS = 4; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_BATCH_CREATE_METASTORE_PARTITIONS: + serviceImpl.batchCreateMetastorePartitions( + (com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest) + request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha + .BatchCreateMetastorePartitionsResponse>) + responseObserver); + break; + case METHODID_BATCH_DELETE_METASTORE_PARTITIONS: + serviceImpl.batchDeleteMetastorePartitions( + (com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest) + request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_BATCH_UPDATE_METASTORE_PARTITIONS: + serviceImpl.batchUpdateMetastorePartitions( + (com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest) + request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha + .BatchUpdateMetastorePartitionsResponse>) + responseObserver); + break; + case METHODID_LIST_METASTORE_PARTITIONS: + serviceImpl.listMetastorePartitions( + (com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse>) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_STREAM_METASTORE_PARTITIONS: + return (io.grpc.stub.StreamObserver) + serviceImpl.streamMetastorePartitions( + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha + .StreamMetastorePartitionsResponse>) + responseObserver); + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getBatchCreateMetastorePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha + .BatchCreateMetastorePartitionsResponse>( + service, METHODID_BATCH_CREATE_METASTORE_PARTITIONS))) + .addMethod( + getBatchDeleteMetastorePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest, + com.google.protobuf.Empty>( + service, METHODID_BATCH_DELETE_METASTORE_PARTITIONS))) + .addMethod( + getBatchUpdateMetastorePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha + .BatchUpdateMetastorePartitionsResponse>( + service, METHODID_BATCH_UPDATE_METASTORE_PARTITIONS))) + .addMethod( + getListMetastorePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse>( + service, METHODID_LIST_METASTORE_PARTITIONS))) + .addMethod( + getStreamMetastorePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse>( + service, METHODID_STREAM_METASTORE_PARTITIONS))) + .build(); + } + + private abstract static class MetastorePartitionServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + MetastorePartitionServiceBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("MetastorePartitionService"); + } + } + + private static final class MetastorePartitionServiceFileDescriptorSupplier + extends MetastorePartitionServiceBaseDescriptorSupplier { + MetastorePartitionServiceFileDescriptorSupplier() {} + } + + private static final class MetastorePartitionServiceMethodDescriptorSupplier + extends MetastorePartitionServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + MetastorePartitionServiceMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (MetastorePartitionServiceGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new MetastorePartitionServiceFileDescriptorSupplier()) + .addMethod(getBatchCreateMetastorePartitionsMethod()) + .addMethod(getBatchDeleteMetastorePartitionsMethod()) + .addMethod(getBatchUpdateMetastorePartitionsMethod()) + .addMethod(getListMetastorePartitionsMethod()) + .addMethod(getStreamMetastorePartitionsMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta/pom.xml b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta/pom.xml new file mode 100644 index 000000000000..4af3d9c928ca --- /dev/null +++ b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta/pom.xml @@ -0,0 +1,64 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta + 3.19.1 + grpc-google-cloud-bigquerystorage-v1beta + GRPC library for google-cloud-bigquerystorage + + com.google.cloud + google-cloud-bigquerystorage-parent + 3.19.1 + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta + + + com.google.guava + guava + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + diff --git a/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceGrpc.java b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceGrpc.java new file mode 100644 index 000000000000..2286f95f0889 --- /dev/null +++ b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceGrpc.java @@ -0,0 +1,1050 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
+ * BigQuery Metastore Partition Service API.
+ *  This service is used for managing metastore partitions in BigQuery
+ *  metastore. The service supports only batch operations for write.
+ * 
+ */ +@io.grpc.stub.annotations.GrpcGenerated +public final class MetastorePartitionServiceGrpc { + + private MetastorePartitionServiceGrpc() {} + + public static final java.lang.String SERVICE_NAME = + "google.cloud.bigquery.storage.v1beta.MetastorePartitionService"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse> + getBatchCreateMetastorePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchCreateMetastorePartitions", + requestType = + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest.class, + responseType = + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse> + getBatchCreateMetastorePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse> + getBatchCreateMetastorePartitionsMethod; + if ((getBatchCreateMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getBatchCreateMetastorePartitionsMethod) + == null) { + synchronized (MetastorePartitionServiceGrpc.class) { + if ((getBatchCreateMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getBatchCreateMetastorePartitionsMethod) + == null) { + MetastorePartitionServiceGrpc.getBatchCreateMetastorePartitionsMethod = + getBatchCreateMetastorePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchCreateMetastorePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta + .BatchCreateMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta + .BatchCreateMetastorePartitionsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new MetastorePartitionServiceMethodDescriptorSupplier( + "BatchCreateMetastorePartitions")) + .build(); + } + } + } + return getBatchCreateMetastorePartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest, + com.google.protobuf.Empty> + getBatchDeleteMetastorePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchDeleteMetastorePartitions", + requestType = + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest, + com.google.protobuf.Empty> + getBatchDeleteMetastorePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest, + com.google.protobuf.Empty> + getBatchDeleteMetastorePartitionsMethod; + if ((getBatchDeleteMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getBatchDeleteMetastorePartitionsMethod) + == null) { + synchronized (MetastorePartitionServiceGrpc.class) { + if ((getBatchDeleteMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getBatchDeleteMetastorePartitionsMethod) + == null) { + MetastorePartitionServiceGrpc.getBatchDeleteMetastorePartitionsMethod = + getBatchDeleteMetastorePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchDeleteMetastorePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta + .BatchDeleteMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new MetastorePartitionServiceMethodDescriptorSupplier( + "BatchDeleteMetastorePartitions")) + .build(); + } + } + } + return getBatchDeleteMetastorePartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse> + getBatchUpdateMetastorePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchUpdateMetastorePartitions", + requestType = + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest.class, + responseType = + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse> + getBatchUpdateMetastorePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse> + getBatchUpdateMetastorePartitionsMethod; + if ((getBatchUpdateMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getBatchUpdateMetastorePartitionsMethod) + == null) { + synchronized (MetastorePartitionServiceGrpc.class) { + if ((getBatchUpdateMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getBatchUpdateMetastorePartitionsMethod) + == null) { + MetastorePartitionServiceGrpc.getBatchUpdateMetastorePartitionsMethod = + getBatchUpdateMetastorePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchUpdateMetastorePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta + .BatchUpdateMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta + .BatchUpdateMetastorePartitionsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new MetastorePartitionServiceMethodDescriptorSupplier( + "BatchUpdateMetastorePartitions")) + .build(); + } + } + } + return getBatchUpdateMetastorePartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse> + getListMetastorePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListMetastorePartitions", + requestType = com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse> + getListMetastorePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse> + getListMetastorePartitionsMethod; + if ((getListMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getListMetastorePartitionsMethod) + == null) { + synchronized (MetastorePartitionServiceGrpc.class) { + if ((getListMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getListMetastorePartitionsMethod) + == null) { + MetastorePartitionServiceGrpc.getListMetastorePartitionsMethod = + getListMetastorePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ListMetastorePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta + .ListMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta + .ListMetastorePartitionsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new MetastorePartitionServiceMethodDescriptorSupplier( + "ListMetastorePartitions")) + .build(); + } + } + } + return getListMetastorePartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse> + getStreamMetastorePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "StreamMetastorePartitions", + requestType = com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest.class, + responseType = + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse> + getStreamMetastorePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse> + getStreamMetastorePartitionsMethod; + if ((getStreamMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getStreamMetastorePartitionsMethod) + == null) { + synchronized (MetastorePartitionServiceGrpc.class) { + if ((getStreamMetastorePartitionsMethod = + MetastorePartitionServiceGrpc.getStreamMetastorePartitionsMethod) + == null) { + MetastorePartitionServiceGrpc.getStreamMetastorePartitionsMethod = + getStreamMetastorePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "StreamMetastorePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta + .StreamMetastorePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta + .StreamMetastorePartitionsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new MetastorePartitionServiceMethodDescriptorSupplier( + "StreamMetastorePartitions")) + .build(); + } + } + } + return getStreamMetastorePartitionsMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static MetastorePartitionServiceStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public MetastorePartitionServiceStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceStub(channel, callOptions); + } + }; + return MetastorePartitionServiceStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static MetastorePartitionServiceBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public MetastorePartitionServiceBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceBlockingV2Stub(channel, callOptions); + } + }; + return MetastorePartitionServiceBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static MetastorePartitionServiceBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public MetastorePartitionServiceBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceBlockingStub(channel, callOptions); + } + }; + return MetastorePartitionServiceBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static MetastorePartitionServiceFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public MetastorePartitionServiceFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceFutureStub(channel, callOptions); + } + }; + return MetastorePartitionServiceFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * BigQuery Metastore Partition Service API.
+   *  This service is used for managing metastore partitions in BigQuery
+   *  metastore. The service supports only batch operations for write.
+   * 
+ */ + public interface AsyncService { + + /** + * + * + *
+     * Adds metastore partitions to a table.
+     * 
+ */ + default void batchCreateMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchCreateMetastorePartitionsMethod(), responseObserver); + } + + /** + * + * + *
+     * Deletes metastore partitions from a table.
+     * 
+ */ + default void batchDeleteMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchDeleteMetastorePartitionsMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates metastore partitions in a table.
+     * 
+ */ + default void batchUpdateMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchUpdateMetastorePartitionsMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets metastore partitions from a table.
+     * 
+ */ + default void listMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListMetastorePartitionsMethod(), responseObserver); + } + + /** + * + * + *
+     * This is a bi-di streaming rpc method that allows the client to send
+     * a stream of partitions and commit all of them atomically at the end.
+     * If the commit is successful, the server will return a
+     * response and close the stream. If the commit fails (due to duplicate
+     * partitions or other reason), the server will close the stream with an
+     * error. This method is only available via the gRPC API (not REST).
+     * 
+ */ + default io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest> + streamMetastorePartitions( + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse> + responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall( + getStreamMetastorePartitionsMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service MetastorePartitionService. + * + *
+   * BigQuery Metastore Partition Service API.
+   *  This service is used for managing metastore partitions in BigQuery
+   *  metastore. The service supports only batch operations for write.
+   * 
+ */ + public abstract static class MetastorePartitionServiceImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return MetastorePartitionServiceGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service MetastorePartitionService. + * + *
+   * BigQuery Metastore Partition Service API.
+   *  This service is used for managing metastore partitions in BigQuery
+   *  metastore. The service supports only batch operations for write.
+   * 
+ */ + public static final class MetastorePartitionServiceStub + extends io.grpc.stub.AbstractAsyncStub { + private MetastorePartitionServiceStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected MetastorePartitionServiceStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceStub(channel, callOptions); + } + + /** + * + * + *
+     * Adds metastore partitions to a table.
+     * 
+ */ + public void batchCreateMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchCreateMetastorePartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Deletes metastore partitions from a table.
+     * 
+ */ + public void batchDeleteMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchDeleteMetastorePartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates metastore partitions in a table.
+     * 
+ */ + public void batchUpdateMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchUpdateMetastorePartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Gets metastore partitions from a table.
+     * 
+ */ + public void listMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListMetastorePartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * This is a bi-di streaming rpc method that allows the client to send
+     * a stream of partitions and commit all of them atomically at the end.
+     * If the commit is successful, the server will return a
+     * response and close the stream. If the commit fails (due to duplicate
+     * partitions or other reason), the server will close the stream with an
+     * error. This method is only available via the gRPC API (not REST).
+     * 
+ */ + public io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest> + streamMetastorePartitions( + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse> + responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getStreamMetastorePartitionsMethod(), getCallOptions()), + responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service MetastorePartitionService. + * + *
+   * BigQuery Metastore Partition Service API.
+   *  This service is used for managing metastore partitions in BigQuery
+   *  metastore. The service supports only batch operations for write.
+   * 
+ */ + public static final class MetastorePartitionServiceBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private MetastorePartitionServiceBlockingV2Stub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected MetastorePartitionServiceBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
+     * Adds metastore partitions to a table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + batchCreateMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchCreateMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes metastore partitions from a table.
+     * 
+ */ + public com.google.protobuf.Empty batchDeleteMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchDeleteMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates metastore partitions in a table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + batchUpdateMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchUpdateMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets metastore partitions from a table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse + listMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * This is a bi-di streaming rpc method that allows the client to send
+     * a stream of partitions and commit all of them atomically at the end.
+     * If the commit is successful, the server will return a
+     * response and close the stream. If the commit fails (due to duplicate
+     * partitions or other reason), the server will close the stream with an
+     * error. This method is only available via the gRPC API (not REST).
+     * 
+ */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall< + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse> + streamMetastorePartitions() { + return io.grpc.stub.ClientCalls.blockingBidiStreamingCall( + getChannel(), getStreamMetastorePartitionsMethod(), getCallOptions()); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service + * MetastorePartitionService. + * + *
+   * BigQuery Metastore Partition Service API.
+   *  This service is used for managing metastore partitions in BigQuery
+   *  metastore. The service supports only batch operations for write.
+   * 
+ */ + public static final class MetastorePartitionServiceBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private MetastorePartitionServiceBlockingStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected MetastorePartitionServiceBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Adds metastore partitions to a table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + batchCreateMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchCreateMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes metastore partitions from a table.
+     * 
+ */ + public com.google.protobuf.Empty batchDeleteMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchDeleteMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates metastore partitions in a table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + batchUpdateMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchUpdateMetastorePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets metastore partitions from a table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse + listMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListMetastorePartitionsMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service + * MetastorePartitionService. + * + *
+   * BigQuery Metastore Partition Service API.
+   *  This service is used for managing metastore partitions in BigQuery
+   *  metastore. The service supports only batch operations for write.
+   * 
+ */ + public static final class MetastorePartitionServiceFutureStub + extends io.grpc.stub.AbstractFutureStub { + private MetastorePartitionServiceFutureStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected MetastorePartitionServiceFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new MetastorePartitionServiceFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Adds metastore partitions to a table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse> + batchCreateMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchCreateMetastorePartitionsMethod(), getCallOptions()), + request); + } + + /** + * + * + *
+     * Deletes metastore partitions from a table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + batchDeleteMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchDeleteMetastorePartitionsMethod(), getCallOptions()), + request); + } + + /** + * + * + *
+     * Updates metastore partitions in a table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse> + batchUpdateMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchUpdateMetastorePartitionsMethod(), getCallOptions()), + request); + } + + /** + * + * + *
+     * Gets metastore partitions from a table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse> + listMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListMetastorePartitionsMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_BATCH_CREATE_METASTORE_PARTITIONS = 0; + private static final int METHODID_BATCH_DELETE_METASTORE_PARTITIONS = 1; + private static final int METHODID_BATCH_UPDATE_METASTORE_PARTITIONS = 2; + private static final int METHODID_LIST_METASTORE_PARTITIONS = 3; + private static final int METHODID_STREAM_METASTORE_PARTITIONS = 4; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_BATCH_CREATE_METASTORE_PARTITIONS: + serviceImpl.batchCreateMetastorePartitions( + (com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest) + request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta + .BatchCreateMetastorePartitionsResponse>) + responseObserver); + break; + case METHODID_BATCH_DELETE_METASTORE_PARTITIONS: + serviceImpl.batchDeleteMetastorePartitions( + (com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest) + request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_BATCH_UPDATE_METASTORE_PARTITIONS: + serviceImpl.batchUpdateMetastorePartitions( + (com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest) + request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta + .BatchUpdateMetastorePartitionsResponse>) + responseObserver); + break; + case METHODID_LIST_METASTORE_PARTITIONS: + serviceImpl.listMetastorePartitions( + (com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse>) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_STREAM_METASTORE_PARTITIONS: + return (io.grpc.stub.StreamObserver) + serviceImpl.streamMetastorePartitions( + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta + .StreamMetastorePartitionsResponse>) + responseObserver); + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getBatchCreateMetastorePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta + .BatchCreateMetastorePartitionsResponse>( + service, METHODID_BATCH_CREATE_METASTORE_PARTITIONS))) + .addMethod( + getBatchDeleteMetastorePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest, + com.google.protobuf.Empty>( + service, METHODID_BATCH_DELETE_METASTORE_PARTITIONS))) + .addMethod( + getBatchUpdateMetastorePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta + .BatchUpdateMetastorePartitionsResponse>( + service, METHODID_BATCH_UPDATE_METASTORE_PARTITIONS))) + .addMethod( + getListMetastorePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse>( + service, METHODID_LIST_METASTORE_PARTITIONS))) + .addMethod( + getStreamMetastorePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest, + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse>( + service, METHODID_STREAM_METASTORE_PARTITIONS))) + .build(); + } + + private abstract static class MetastorePartitionServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + MetastorePartitionServiceBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("MetastorePartitionService"); + } + } + + private static final class MetastorePartitionServiceFileDescriptorSupplier + extends MetastorePartitionServiceBaseDescriptorSupplier { + MetastorePartitionServiceFileDescriptorSupplier() {} + } + + private static final class MetastorePartitionServiceMethodDescriptorSupplier + extends MetastorePartitionServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + MetastorePartitionServiceMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (MetastorePartitionServiceGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new MetastorePartitionServiceFileDescriptorSupplier()) + .addMethod(getBatchCreateMetastorePartitionsMethod()) + .addMethod(getBatchDeleteMetastorePartitionsMethod()) + .addMethod(getBatchUpdateMetastorePartitionsMethod()) + .addMethod(getListMetastorePartitionsMethod()) + .addMethod(getStreamMetastorePartitionsMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml new file mode 100644 index 000000000000..6810f8588cd7 --- /dev/null +++ b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml @@ -0,0 +1,65 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta1 + 0.191.1 + grpc-google-cloud-bigquerystorage-v1beta1 + GRPC library for grpc-google-cloud-bigquerystorage-v1beta1 + + com.google.cloud + google-cloud-bigquerystorage-parent + 3.19.1 + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta1 + + + com.google.guava + guava + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + diff --git a/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageGrpc.java b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageGrpc.java new file mode 100644 index 000000000000..cf74d93f6857 --- /dev/null +++ b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageGrpc.java @@ -0,0 +1,1209 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta1; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
+ * BigQuery storage API.
+ * The BigQuery storage API can be used to read data stored in BigQuery.
+ * The v1beta1 API is not yet officially deprecated, and will go through a full
+ * deprecation cycle (https://cloud.google.com/products#product-launch-stages)
+ * before the service is turned down. However, new code should use the v1 API
+ * going forward.
+ * 
+ */ +@io.grpc.stub.annotations.GrpcGenerated +public final class BigQueryStorageGrpc { + + private BigQueryStorageGrpc() {} + + public static final java.lang.String SERVICE_NAME = + "google.cloud.bigquery.storage.v1beta1.BigQueryStorage"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession> + getCreateReadSessionMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateReadSession", + requestType = + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession> + getCreateReadSessionMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession> + getCreateReadSessionMethod; + if ((getCreateReadSessionMethod = BigQueryStorageGrpc.getCreateReadSessionMethod) == null) { + synchronized (BigQueryStorageGrpc.class) { + if ((getCreateReadSessionMethod = BigQueryStorageGrpc.getCreateReadSessionMethod) == null) { + BigQueryStorageGrpc.getCreateReadSessionMethod = + getCreateReadSessionMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateReadSession")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta1.Storage + .CreateReadSessionRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryStorageMethodDescriptorSupplier("CreateReadSession")) + .build(); + } + } + } + return getCreateReadSessionMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse> + getReadRowsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ReadRows", + requestType = com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse> + getReadRowsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse> + getReadRowsMethod; + if ((getReadRowsMethod = BigQueryStorageGrpc.getReadRowsMethod) == null) { + synchronized (BigQueryStorageGrpc.class) { + if ((getReadRowsMethod = BigQueryStorageGrpc.getReadRowsMethod) == null) { + BigQueryStorageGrpc.getReadRowsMethod = + getReadRowsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ReadRows")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse + .getDefaultInstance())) + .setSchemaDescriptor(new BigQueryStorageMethodDescriptorSupplier("ReadRows")) + .build(); + } + } + } + return getReadRowsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse> + getBatchCreateReadSessionStreamsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchCreateReadSessionStreams", + requestType = + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + .class, + responseType = + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + .class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse> + getBatchCreateReadSessionStreamsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse> + getBatchCreateReadSessionStreamsMethod; + if ((getBatchCreateReadSessionStreamsMethod = + BigQueryStorageGrpc.getBatchCreateReadSessionStreamsMethod) + == null) { + synchronized (BigQueryStorageGrpc.class) { + if ((getBatchCreateReadSessionStreamsMethod = + BigQueryStorageGrpc.getBatchCreateReadSessionStreamsMethod) + == null) { + BigQueryStorageGrpc.getBatchCreateReadSessionStreamsMethod = + getBatchCreateReadSessionStreamsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchCreateReadSessionStreams")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryStorageMethodDescriptorSupplier( + "BatchCreateReadSessionStreams")) + .build(); + } + } + } + return getBatchCreateReadSessionStreamsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest, + com.google.protobuf.Empty> + getFinalizeStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "FinalizeStream", + requestType = com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest, + com.google.protobuf.Empty> + getFinalizeStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest, + com.google.protobuf.Empty> + getFinalizeStreamMethod; + if ((getFinalizeStreamMethod = BigQueryStorageGrpc.getFinalizeStreamMethod) == null) { + synchronized (BigQueryStorageGrpc.class) { + if ((getFinalizeStreamMethod = BigQueryStorageGrpc.getFinalizeStreamMethod) == null) { + BigQueryStorageGrpc.getFinalizeStreamMethod = + getFinalizeStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "FinalizeStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta1.Storage + .FinalizeStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryStorageMethodDescriptorSupplier("FinalizeStream")) + .build(); + } + } + } + return getFinalizeStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse> + getSplitReadStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "SplitReadStream", + requestType = com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest.class, + responseType = + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse> + getSplitReadStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse> + getSplitReadStreamMethod; + if ((getSplitReadStreamMethod = BigQueryStorageGrpc.getSplitReadStreamMethod) == null) { + synchronized (BigQueryStorageGrpc.class) { + if ((getSplitReadStreamMethod = BigQueryStorageGrpc.getSplitReadStreamMethod) == null) { + BigQueryStorageGrpc.getSplitReadStreamMethod = + getSplitReadStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "SplitReadStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta1.Storage + .SplitReadStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta1.Storage + .SplitReadStreamResponse.getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryStorageMethodDescriptorSupplier("SplitReadStream")) + .build(); + } + } + } + return getSplitReadStreamMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static BigQueryStorageStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryStorageStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryStorageStub(channel, callOptions); + } + }; + return BigQueryStorageStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static BigQueryStorageBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryStorageBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryStorageBlockingV2Stub(channel, callOptions); + } + }; + return BigQueryStorageBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static BigQueryStorageBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryStorageBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryStorageBlockingStub(channel, callOptions); + } + }; + return BigQueryStorageBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static BigQueryStorageFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryStorageFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryStorageFutureStub(channel, callOptions); + } + }; + return BigQueryStorageFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * BigQuery storage API.
+   * The BigQuery storage API can be used to read data stored in BigQuery.
+   * The v1beta1 API is not yet officially deprecated, and will go through a full
+   * deprecation cycle (https://cloud.google.com/products#product-launch-stages)
+   * before the service is turned down. However, new code should use the v1 API
+   * going forward.
+   * 
+ */ + public interface AsyncService { + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + default void createReadSession( + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateReadSessionMethod(), responseObserver); + } + + /** + * + * + *
+     * Reads rows from the table in the format prescribed by the read session.
+     * Each response contains one or more table rows, up to a maximum of 10 MiB
+     * per response; read requests which attempt to read individual rows larger
+     * than this will fail.
+     * Each request also returns a set of stream statistics reflecting the
+     * estimated total number of rows in the read stream. This number is computed
+     * based on the total table size and the number of active streams in the read
+     * session, and may change as other streams continue to read data.
+     * 
+ */ + default void readRows( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getReadRowsMethod(), responseObserver); + } + + /** + * + * + *
+     * Creates additional streams for a ReadSession. This API can be used to
+     * dynamically adjust the parallelism of a batch processing task upwards by
+     * adding additional workers.
+     * 
+ */ + default void batchCreateReadSessionStreams( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchCreateReadSessionStreamsMethod(), responseObserver); + } + + /** + * + * + *
+     * Causes a single stream in a ReadSession to gracefully stop. This
+     * API can be used to dynamically adjust the parallelism of a batch processing
+     * task downwards without losing data.
+     * This API does not delete the stream -- it remains visible in the
+     * ReadSession, and any data processed by the stream is not released to other
+     * streams. However, no additional data will be assigned to the stream once
+     * this call completes. Callers must continue reading data on the stream until
+     * the end of the stream is reached so that data which has already been
+     * assigned to the stream will be processed.
+     * This method will return an error if there are no other live streams
+     * in the Session, or if SplitReadStream() has been called on the given
+     * Stream.
+     * 
+ */ + default void finalizeStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getFinalizeStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Splits a given read stream into two Streams. These streams are referred to
+     * as the primary and the residual of the split. The original stream can still
+     * be read from in the same manner as before. Both of the returned streams can
+     * also be read from, and the total rows return by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back to back in the
+     * original Stream. Concretely, it is guaranteed that for streams Original,
+     * Primary, and Residual, that Original[0-j] = Primary[0-j] and
+     * Original[j-n] = Residual[0-m] once the streams have been read to
+     * completion.
+     * This method is guaranteed to be idempotent.
+     * 
+ */ + default void splitReadStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getSplitReadStreamMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service BigQueryStorage. + * + *
+   * BigQuery storage API.
+   * The BigQuery storage API can be used to read data stored in BigQuery.
+   * The v1beta1 API is not yet officially deprecated, and will go through a full
+   * deprecation cycle (https://cloud.google.com/products#product-launch-stages)
+   * before the service is turned down. However, new code should use the v1 API
+   * going forward.
+   * 
+ */ + public abstract static class BigQueryStorageImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return BigQueryStorageGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service BigQueryStorage. + * + *
+   * BigQuery storage API.
+   * The BigQuery storage API can be used to read data stored in BigQuery.
+   * The v1beta1 API is not yet officially deprecated, and will go through a full
+   * deprecation cycle (https://cloud.google.com/products#product-launch-stages)
+   * before the service is turned down. However, new code should use the v1 API
+   * going forward.
+   * 
+ */ + public static final class BigQueryStorageStub + extends io.grpc.stub.AbstractAsyncStub { + private BigQueryStorageStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryStorageStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryStorageStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public void createReadSession( + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateReadSessionMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Reads rows from the table in the format prescribed by the read session.
+     * Each response contains one or more table rows, up to a maximum of 10 MiB
+     * per response; read requests which attempt to read individual rows larger
+     * than this will fail.
+     * Each request also returns a set of stream statistics reflecting the
+     * estimated total number of rows in the read stream. This number is computed
+     * based on the total table size and the number of active streams in the read
+     * session, and may change as other streams continue to read data.
+     * 
+ */ + public void readRows( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncServerStreamingCall( + getChannel().newCall(getReadRowsMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
+     * Creates additional streams for a ReadSession. This API can be used to
+     * dynamically adjust the parallelism of a batch processing task upwards by
+     * adding additional workers.
+     * 
+ */ + public void batchCreateReadSessionStreams( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchCreateReadSessionStreamsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Causes a single stream in a ReadSession to gracefully stop. This
+     * API can be used to dynamically adjust the parallelism of a batch processing
+     * task downwards without losing data.
+     * This API does not delete the stream -- it remains visible in the
+     * ReadSession, and any data processed by the stream is not released to other
+     * streams. However, no additional data will be assigned to the stream once
+     * this call completes. Callers must continue reading data on the stream until
+     * the end of the stream is reached so that data which has already been
+     * assigned to the stream will be processed.
+     * This method will return an error if there are no other live streams
+     * in the Session, or if SplitReadStream() has been called on the given
+     * Stream.
+     * 
+ */ + public void finalizeStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getFinalizeStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Splits a given read stream into two Streams. These streams are referred to
+     * as the primary and the residual of the split. The original stream can still
+     * be read from in the same manner as before. Both of the returned streams can
+     * also be read from, and the total rows return by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back to back in the
+     * original Stream. Concretely, it is guaranteed that for streams Original,
+     * Primary, and Residual, that Original[0-j] = Primary[0-j] and
+     * Original[j-n] = Residual[0-m] once the streams have been read to
+     * completion.
+     * This method is guaranteed to be idempotent.
+     * 
+ */ + public void splitReadStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getSplitReadStreamMethod(), getCallOptions()), + request, + responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service BigQueryStorage. + * + *
+   * BigQuery storage API.
+   * The BigQuery storage API can be used to read data stored in BigQuery.
+   * The v1beta1 API is not yet officially deprecated, and will go through a full
+   * deprecation cycle (https://cloud.google.com/products#product-launch-stages)
+   * before the service is turned down. However, new code should use the v1 API
+   * going forward.
+   * 
+ */ + public static final class BigQueryStorageBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private BigQueryStorageBlockingV2Stub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryStorageBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryStorageBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession createReadSession( + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateReadSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Reads rows from the table in the format prescribed by the read session.
+     * Each response contains one or more table rows, up to a maximum of 10 MiB
+     * per response; read requests which attempt to read individual rows larger
+     * than this will fail.
+     * Each request also returns a set of stream statistics reflecting the
+     * estimated total number of rows in the read stream. This number is computed
+     * based on the total table size and the number of active streams in the read
+     * session, and may change as other streams continue to read data.
+     * 
+ */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall< + ?, com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse> + readRows(com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest request) { + return io.grpc.stub.ClientCalls.blockingV2ServerStreamingCall( + getChannel(), getReadRowsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates additional streams for a ReadSession. This API can be used to
+     * dynamically adjust the parallelism of a batch processing task upwards by
+     * adding additional workers.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + batchCreateReadSessionStreams( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchCreateReadSessionStreamsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Causes a single stream in a ReadSession to gracefully stop. This
+     * API can be used to dynamically adjust the parallelism of a batch processing
+     * task downwards without losing data.
+     * This API does not delete the stream -- it remains visible in the
+     * ReadSession, and any data processed by the stream is not released to other
+     * streams. However, no additional data will be assigned to the stream once
+     * this call completes. Callers must continue reading data on the stream until
+     * the end of the stream is reached so that data which has already been
+     * assigned to the stream will be processed.
+     * This method will return an error if there are no other live streams
+     * in the Session, or if SplitReadStream() has been called on the given
+     * Stream.
+     * 
+ */ + public com.google.protobuf.Empty finalizeStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getFinalizeStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Splits a given read stream into two Streams. These streams are referred to
+     * as the primary and the residual of the split. The original stream can still
+     * be read from in the same manner as before. Both of the returned streams can
+     * also be read from, and the total rows return by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back to back in the
+     * original Stream. Concretely, it is guaranteed that for streams Original,
+     * Primary, and Residual, that Original[0-j] = Primary[0-j] and
+     * Original[j-n] = Residual[0-m] once the streams have been read to
+     * completion.
+     * This method is guaranteed to be idempotent.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + splitReadStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getSplitReadStreamMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service BigQueryStorage. + * + *
+   * BigQuery storage API.
+   * The BigQuery storage API can be used to read data stored in BigQuery.
+   * The v1beta1 API is not yet officially deprecated, and will go through a full
+   * deprecation cycle (https://cloud.google.com/products#product-launch-stages)
+   * before the service is turned down. However, new code should use the v1 API
+   * going forward.
+   * 
+ */ + public static final class BigQueryStorageBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private BigQueryStorageBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryStorageBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryStorageBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession createReadSession( + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateReadSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Reads rows from the table in the format prescribed by the read session.
+     * Each response contains one or more table rows, up to a maximum of 10 MiB
+     * per response; read requests which attempt to read individual rows larger
+     * than this will fail.
+     * Each request also returns a set of stream statistics reflecting the
+     * estimated total number of rows in the read stream. This number is computed
+     * based on the total table size and the number of active streams in the read
+     * session, and may change as other streams continue to read data.
+     * 
+ */ + public java.util.Iterator + readRows(com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest request) { + return io.grpc.stub.ClientCalls.blockingServerStreamingCall( + getChannel(), getReadRowsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates additional streams for a ReadSession. This API can be used to
+     * dynamically adjust the parallelism of a batch processing task upwards by
+     * adding additional workers.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + batchCreateReadSessionStreams( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchCreateReadSessionStreamsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Causes a single stream in a ReadSession to gracefully stop. This
+     * API can be used to dynamically adjust the parallelism of a batch processing
+     * task downwards without losing data.
+     * This API does not delete the stream -- it remains visible in the
+     * ReadSession, and any data processed by the stream is not released to other
+     * streams. However, no additional data will be assigned to the stream once
+     * this call completes. Callers must continue reading data on the stream until
+     * the end of the stream is reached so that data which has already been
+     * assigned to the stream will be processed.
+     * This method will return an error if there are no other live streams
+     * in the Session, or if SplitReadStream() has been called on the given
+     * Stream.
+     * 
+ */ + public com.google.protobuf.Empty finalizeStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getFinalizeStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Splits a given read stream into two Streams. These streams are referred to
+     * as the primary and the residual of the split. The original stream can still
+     * be read from in the same manner as before. Both of the returned streams can
+     * also be read from, and the total rows return by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back to back in the
+     * original Stream. Concretely, it is guaranteed that for streams Original,
+     * Primary, and Residual, that Original[0-j] = Primary[0-j] and
+     * Original[j-n] = Residual[0-m] once the streams have been read to
+     * completion.
+     * This method is guaranteed to be idempotent.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + splitReadStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getSplitReadStreamMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service BigQueryStorage. + * + *
+   * BigQuery storage API.
+   * The BigQuery storage API can be used to read data stored in BigQuery.
+   * The v1beta1 API is not yet officially deprecated, and will go through a full
+   * deprecation cycle (https://cloud.google.com/products#product-launch-stages)
+   * before the service is turned down. However, new code should use the v1 API
+   * going forward.
+   * 
+ */ + public static final class BigQueryStorageFutureStub + extends io.grpc.stub.AbstractFutureStub { + private BigQueryStorageFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryStorageFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryStorageFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession> + createReadSession( + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateReadSessionMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Creates additional streams for a ReadSession. This API can be used to
+     * dynamically adjust the parallelism of a batch processing task upwards by
+     * adding additional workers.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse> + batchCreateReadSessionStreams( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchCreateReadSessionStreamsMethod(), getCallOptions()), + request); + } + + /** + * + * + *
+     * Causes a single stream in a ReadSession to gracefully stop. This
+     * API can be used to dynamically adjust the parallelism of a batch processing
+     * task downwards without losing data.
+     * This API does not delete the stream -- it remains visible in the
+     * ReadSession, and any data processed by the stream is not released to other
+     * streams. However, no additional data will be assigned to the stream once
+     * this call completes. Callers must continue reading data on the stream until
+     * the end of the stream is reached so that data which has already been
+     * assigned to the stream will be processed.
+     * This method will return an error if there are no other live streams
+     * in the Session, or if SplitReadStream() has been called on the given
+     * Stream.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + finalizeStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getFinalizeStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Splits a given read stream into two Streams. These streams are referred to
+     * as the primary and the residual of the split. The original stream can still
+     * be read from in the same manner as before. Both of the returned streams can
+     * also be read from, and the total rows return by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back to back in the
+     * original Stream. Concretely, it is guaranteed that for streams Original,
+     * Primary, and Residual, that Original[0-j] = Primary[0-j] and
+     * Original[j-n] = Residual[0-m] once the streams have been read to
+     * completion.
+     * This method is guaranteed to be idempotent.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse> + splitReadStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getSplitReadStreamMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_READ_SESSION = 0; + private static final int METHODID_READ_ROWS = 1; + private static final int METHODID_BATCH_CREATE_READ_SESSION_STREAMS = 2; + private static final int METHODID_FINALIZE_STREAM = 3; + private static final int METHODID_SPLIT_READ_STREAM = 4; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_READ_SESSION: + serviceImpl.createReadSession( + (com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession>) + responseObserver); + break; + case METHODID_READ_ROWS: + serviceImpl.readRows( + (com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse>) + responseObserver); + break; + case METHODID_BATCH_CREATE_READ_SESSION_STREAMS: + serviceImpl.batchCreateReadSessionStreams( + (com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest) + request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse>) + responseObserver); + break; + case METHODID_FINALIZE_STREAM: + serviceImpl.finalizeStream( + (com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_SPLIT_READ_STREAM: + serviceImpl.splitReadStream( + (com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse>) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateReadSessionMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession>( + service, METHODID_CREATE_READ_SESSION))) + .addMethod( + getReadRowsMethod(), + io.grpc.stub.ServerCalls.asyncServerStreamingCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse>( + service, METHODID_READ_ROWS))) + .addMethod( + getBatchCreateReadSessionStreamsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse>( + service, METHODID_BATCH_CREATE_READ_SESSION_STREAMS))) + .addMethod( + getFinalizeStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest, + com.google.protobuf.Empty>(service, METHODID_FINALIZE_STREAM))) + .addMethod( + getSplitReadStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse>( + service, METHODID_SPLIT_READ_STREAM))) + .build(); + } + + private abstract static class BigQueryStorageBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + BigQueryStorageBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("BigQueryStorage"); + } + } + + private static final class BigQueryStorageFileDescriptorSupplier + extends BigQueryStorageBaseDescriptorSupplier { + BigQueryStorageFileDescriptorSupplier() {} + } + + private static final class BigQueryStorageMethodDescriptorSupplier + extends BigQueryStorageBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + BigQueryStorageMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (BigQueryStorageGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new BigQueryStorageFileDescriptorSupplier()) + .addMethod(getCreateReadSessionMethod()) + .addMethod(getReadRowsMethod()) + .addMethod(getBatchCreateReadSessionStreamsMethod()) + .addMethod(getFinalizeStreamMethod()) + .addMethod(getSplitReadStreamMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml new file mode 100644 index 000000000000..0e9d698489e3 --- /dev/null +++ b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml @@ -0,0 +1,50 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta2 + 0.191.1 + grpc-google-cloud-bigquerystorage-v1beta2 + GRPC library for grpc-google-cloud-bigquerystorage-v1beta2 + + com.google.cloud + google-cloud-bigquerystorage-parent + 3.19.1 + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta2 + + + com.google.guava + guava + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + diff --git a/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java new file mode 100644 index 000000000000..ae0f0e53678a --- /dev/null +++ b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java @@ -0,0 +1,847 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
+ * BigQuery Read API.
+ * The Read API can be used to read data from BigQuery.
+ * New code should use the v1 Read API going forward, if they don't use Write
+ * API at the same time.
+ * 
+ */ +@io.grpc.stub.annotations.GrpcGenerated +public final class BigQueryReadGrpc { + + private BigQueryReadGrpc() {} + + public static final java.lang.String SERVICE_NAME = + "google.cloud.bigquery.storage.v1beta2.BigQueryRead"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadSession> + getCreateReadSessionMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateReadSession", + requestType = com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta2.ReadSession.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadSession> + getCreateReadSessionMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadSession> + getCreateReadSessionMethod; + if ((getCreateReadSessionMethod = BigQueryReadGrpc.getCreateReadSessionMethod) == null) { + synchronized (BigQueryReadGrpc.class) { + if ((getCreateReadSessionMethod = BigQueryReadGrpc.getCreateReadSessionMethod) == null) { + BigQueryReadGrpc.getCreateReadSessionMethod = + getCreateReadSessionMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateReadSession")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.ReadSession + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryReadMethodDescriptorSupplier("CreateReadSession")) + .build(); + } + } + } + return getCreateReadSessionMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse> + getReadRowsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ReadRows", + requestType = com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse> + getReadRowsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse> + getReadRowsMethod; + if ((getReadRowsMethod = BigQueryReadGrpc.getReadRowsMethod) == null) { + synchronized (BigQueryReadGrpc.class) { + if ((getReadRowsMethod = BigQueryReadGrpc.getReadRowsMethod) == null) { + BigQueryReadGrpc.getReadRowsMethod = + getReadRowsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ReadRows")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse + .getDefaultInstance())) + .setSchemaDescriptor(new BigQueryReadMethodDescriptorSupplier("ReadRows")) + .build(); + } + } + } + return getReadRowsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse> + getSplitReadStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "SplitReadStream", + requestType = com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse> + getSplitReadStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse> + getSplitReadStreamMethod; + if ((getSplitReadStreamMethod = BigQueryReadGrpc.getSplitReadStreamMethod) == null) { + synchronized (BigQueryReadGrpc.class) { + if ((getSplitReadStreamMethod = BigQueryReadGrpc.getSplitReadStreamMethod) == null) { + BigQueryReadGrpc.getSplitReadStreamMethod = + getSplitReadStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "SplitReadStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryReadMethodDescriptorSupplier("SplitReadStream")) + .build(); + } + } + } + return getSplitReadStreamMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static BigQueryReadStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryReadStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadStub(channel, callOptions); + } + }; + return BigQueryReadStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static BigQueryReadBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryReadBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadBlockingV2Stub(channel, callOptions); + } + }; + return BigQueryReadBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static BigQueryReadBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryReadBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadBlockingStub(channel, callOptions); + } + }; + return BigQueryReadBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static BigQueryReadFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryReadFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadFutureStub(channel, callOptions); + } + }; + return BigQueryReadFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * New code should use the v1 Read API going forward, if they don't use Write
+   * API at the same time.
+   * 
+ */ + public interface AsyncService { + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + default void createReadSession( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateReadSessionMethod(), responseObserver); + } + + /** + * + * + *
+     * Reads rows from the stream in the format prescribed by the ReadSession.
+     * Each response contains one or more table rows, up to a maximum of 100 MiB
+     * per response; read requests which attempt to read individual rows larger
+     * than 100 MiB will fail.
+     * Each request also returns a set of stream statistics reflecting the current
+     * state of the stream.
+     * 
+ */ + default void readRows( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getReadRowsMethod(), responseObserver); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + default void splitReadStream( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getSplitReadStreamMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service BigQueryRead. + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * New code should use the v1 Read API going forward, if they don't use Write
+   * API at the same time.
+   * 
+ */ + public abstract static class BigQueryReadImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return BigQueryReadGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service BigQueryRead. + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * New code should use the v1 Read API going forward, if they don't use Write
+   * API at the same time.
+   * 
+ */ + public static final class BigQueryReadStub + extends io.grpc.stub.AbstractAsyncStub { + private BigQueryReadStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryReadStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public void createReadSession( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateReadSessionMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Reads rows from the stream in the format prescribed by the ReadSession.
+     * Each response contains one or more table rows, up to a maximum of 100 MiB
+     * per response; read requests which attempt to read individual rows larger
+     * than 100 MiB will fail.
+     * Each request also returns a set of stream statistics reflecting the current
+     * state of the stream.
+     * 
+ */ + public void readRows( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncServerStreamingCall( + getChannel().newCall(getReadRowsMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + public void splitReadStream( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getSplitReadStreamMethod(), getCallOptions()), + request, + responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service BigQueryRead. + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * New code should use the v1 Read API going forward, if they don't use Write
+   * API at the same time.
+   * 
+ */ + public static final class BigQueryReadBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private BigQueryReadBlockingV2Stub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryReadBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession createReadSession( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateReadSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Reads rows from the stream in the format prescribed by the ReadSession.
+     * Each response contains one or more table rows, up to a maximum of 100 MiB
+     * per response; read requests which attempt to read individual rows larger
+     * than 100 MiB will fail.
+     * Each request also returns a set of stream statistics reflecting the current
+     * state of the stream.
+     * 
+ */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall< + ?, com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse> + readRows(com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest request) { + return io.grpc.stub.ClientCalls.blockingV2ServerStreamingCall( + getChannel(), getReadRowsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse splitReadStream( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getSplitReadStreamMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service BigQueryRead. + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * New code should use the v1 Read API going forward, if they don't use Write
+   * API at the same time.
+   * 
+ */ + public static final class BigQueryReadBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private BigQueryReadBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryReadBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession createReadSession( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateReadSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Reads rows from the stream in the format prescribed by the ReadSession.
+     * Each response contains one or more table rows, up to a maximum of 100 MiB
+     * per response; read requests which attempt to read individual rows larger
+     * than 100 MiB will fail.
+     * Each request also returns a set of stream statistics reflecting the current
+     * state of the stream.
+     * 
+ */ + public java.util.Iterator readRows( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest request) { + return io.grpc.stub.ClientCalls.blockingServerStreamingCall( + getChannel(), getReadRowsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse splitReadStream( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getSplitReadStreamMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service BigQueryRead. + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * New code should use the v1 Read API going forward, if they don't use Write
+   * API at the same time.
+   * 
+ */ + public static final class BigQueryReadFutureStub + extends io.grpc.stub.AbstractFutureStub { + private BigQueryReadFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryReadFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 6 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta2.ReadSession> + createReadSession( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateReadSessionMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse> + splitReadStream(com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getSplitReadStreamMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_READ_SESSION = 0; + private static final int METHODID_READ_ROWS = 1; + private static final int METHODID_SPLIT_READ_STREAM = 2; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_READ_SESSION: + serviceImpl.createReadSession( + (com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_READ_ROWS: + serviceImpl.readRows( + (com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse>) + responseObserver); + break; + case METHODID_SPLIT_READ_STREAM: + serviceImpl.splitReadStream( + (com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse>) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateReadSessionMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadSession>( + service, METHODID_CREATE_READ_SESSION))) + .addMethod( + getReadRowsMethod(), + io.grpc.stub.ServerCalls.asyncServerStreamingCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse>( + service, METHODID_READ_ROWS))) + .addMethod( + getSplitReadStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse>( + service, METHODID_SPLIT_READ_STREAM))) + .build(); + } + + private abstract static class BigQueryReadBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + BigQueryReadBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("BigQueryRead"); + } + } + + private static final class BigQueryReadFileDescriptorSupplier + extends BigQueryReadBaseDescriptorSupplier { + BigQueryReadFileDescriptorSupplier() {} + } + + private static final class BigQueryReadMethodDescriptorSupplier + extends BigQueryReadBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + BigQueryReadMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (BigQueryReadGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new BigQueryReadFileDescriptorSupplier()) + .addMethod(getCreateReadSessionMethod()) + .addMethod(getReadRowsMethod()) + .addMethod(getSplitReadStreamMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteGrpc.java b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteGrpc.java new file mode 100644 index 000000000000..62668225f70c --- /dev/null +++ b/java-bigquerystorage/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteGrpc.java @@ -0,0 +1,1271 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
+ * BigQuery Write API.
+ * The Write API can be used to write data to BigQuery.
+ * The [google.cloud.bigquery.storage.v1
+ *   API](/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1)
+ *   should be used instead of the v1beta2 API for BigQueryWrite operations.
+ * 
+ */ +@io.grpc.stub.annotations.GrpcGenerated +@java.lang.Deprecated +public final class BigQueryWriteGrpc { + + private BigQueryWriteGrpc() {} + + public static final java.lang.String SERVICE_NAME = + "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.WriteStream> + getCreateWriteStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateWriteStream", + requestType = com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta2.WriteStream.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.WriteStream> + getCreateWriteStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.WriteStream> + getCreateWriteStreamMethod; + if ((getCreateWriteStreamMethod = BigQueryWriteGrpc.getCreateWriteStreamMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getCreateWriteStreamMethod = BigQueryWriteGrpc.getCreateWriteStreamMethod) == null) { + BigQueryWriteGrpc.getCreateWriteStreamMethod = + getCreateWriteStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateWriteStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.WriteStream + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("CreateWriteStream")) + .build(); + } + } + } + return getCreateWriteStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse> + getAppendRowsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "AppendRows", + requestType = com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse> + getAppendRowsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse> + getAppendRowsMethod; + if ((getAppendRowsMethod = BigQueryWriteGrpc.getAppendRowsMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getAppendRowsMethod = BigQueryWriteGrpc.getAppendRowsMethod) == null) { + BigQueryWriteGrpc.getAppendRowsMethod = + getAppendRowsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "AppendRows")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse + .getDefaultInstance())) + .setSchemaDescriptor(new BigQueryWriteMethodDescriptorSupplier("AppendRows")) + .build(); + } + } + } + return getAppendRowsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.WriteStream> + getGetWriteStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetWriteStream", + requestType = com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta2.WriteStream.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.WriteStream> + getGetWriteStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.WriteStream> + getGetWriteStreamMethod; + if ((getGetWriteStreamMethod = BigQueryWriteGrpc.getGetWriteStreamMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getGetWriteStreamMethod = BigQueryWriteGrpc.getGetWriteStreamMethod) == null) { + BigQueryWriteGrpc.getGetWriteStreamMethod = + getGetWriteStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetWriteStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.WriteStream + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("GetWriteStream")) + .build(); + } + } + } + return getGetWriteStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse> + getFinalizeWriteStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "FinalizeWriteStream", + requestType = com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse> + getFinalizeWriteStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse> + getFinalizeWriteStreamMethod; + if ((getFinalizeWriteStreamMethod = BigQueryWriteGrpc.getFinalizeWriteStreamMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getFinalizeWriteStreamMethod = BigQueryWriteGrpc.getFinalizeWriteStreamMethod) + == null) { + BigQueryWriteGrpc.getFinalizeWriteStreamMethod = + getFinalizeWriteStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "FinalizeWriteStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("FinalizeWriteStream")) + .build(); + } + } + } + return getFinalizeWriteStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse> + getBatchCommitWriteStreamsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchCommitWriteStreams", + requestType = com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest.class, + responseType = + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse> + getBatchCommitWriteStreamsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse> + getBatchCommitWriteStreamsMethod; + if ((getBatchCommitWriteStreamsMethod = BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod) + == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getBatchCommitWriteStreamsMethod = BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod) + == null) { + BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod = + getBatchCommitWriteStreamsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchCommitWriteStreams")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2 + .BatchCommitWriteStreamsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2 + .BatchCommitWriteStreamsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("BatchCommitWriteStreams")) + .build(); + } + } + } + return getBatchCommitWriteStreamsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse> + getFlushRowsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "FlushRows", + requestType = com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse> + getFlushRowsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse> + getFlushRowsMethod; + if ((getFlushRowsMethod = BigQueryWriteGrpc.getFlushRowsMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getFlushRowsMethod = BigQueryWriteGrpc.getFlushRowsMethod) == null) { + BigQueryWriteGrpc.getFlushRowsMethod = + getFlushRowsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "FlushRows")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse + .getDefaultInstance())) + .setSchemaDescriptor(new BigQueryWriteMethodDescriptorSupplier("FlushRows")) + .build(); + } + } + } + return getFlushRowsMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static BigQueryWriteStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryWriteStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteStub(channel, callOptions); + } + }; + return BigQueryWriteStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static BigQueryWriteBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryWriteBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteBlockingV2Stub(channel, callOptions); + } + }; + return BigQueryWriteBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static BigQueryWriteBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryWriteBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteBlockingStub(channel, callOptions); + } + }; + return BigQueryWriteBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static BigQueryWriteFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public BigQueryWriteFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteFutureStub(channel, callOptions); + } + }; + return BigQueryWriteFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * The [google.cloud.bigquery.storage.v1
+   *   API](/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1)
+   *   should be used instead of the v1beta2 API for BigQueryWrite operations.
+   * 
+ */ + @java.lang.Deprecated + public interface AsyncService { + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special COMMITTED stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + @java.lang.Deprecated + default void createWriteStream( + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateWriteStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Appends data to the given stream.
+     * If `offset` is specified, the `offset` is checked against the end of
+     * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+     * attempt is made to append to an offset beyond the current end of the stream
+     * or `ALREADY_EXISTS` if user provids an `offset` that has already been
+     * written to. User can retry with adjusted offset within the same RPC
+     * stream. If `offset` is not specified, append happens at the end of the
+     * stream.
+     * The response contains the offset at which the append happened. Responses
+     * are received in the same order in which requests are sent. There will be
+     * one response for each successful request. If the `offset` is not set in
+     * response, it means append didn't happen due to some errors. If one request
+     * fails, all the subsequent requests will also fail until a success request
+     * is made again.
+     * If the stream is of `PENDING` type, data will only be available for read
+     * operations after the stream is committed.
+     * 
+ */ + @java.lang.Deprecated + default io.grpc.stub.StreamObserver + appendRows( + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse> + responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall( + getAppendRowsMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets a write stream.
+     * 
+ */ + @java.lang.Deprecated + default void getWriteStream( + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetWriteStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + @java.lang.Deprecated + default void finalizeWriteStream( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getFinalizeWriteStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + @java.lang.Deprecated + default void batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchCommitWriteStreamsMethod(), responseObserver); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + @java.lang.Deprecated + default void flushRows( + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getFlushRowsMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service BigQueryWrite. + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * The [google.cloud.bigquery.storage.v1
+   *   API](/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1)
+   *   should be used instead of the v1beta2 API for BigQueryWrite operations.
+   * 
+ */ + @java.lang.Deprecated + public abstract static class BigQueryWriteImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return BigQueryWriteGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service BigQueryWrite. + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * The [google.cloud.bigquery.storage.v1
+   *   API](/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1)
+   *   should be used instead of the v1beta2 API for BigQueryWrite operations.
+   * 
+ */ + @java.lang.Deprecated + public static final class BigQueryWriteStub + extends io.grpc.stub.AbstractAsyncStub { + private BigQueryWriteStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special COMMITTED stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + @java.lang.Deprecated + public void createWriteStream( + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateWriteStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Appends data to the given stream.
+     * If `offset` is specified, the `offset` is checked against the end of
+     * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+     * attempt is made to append to an offset beyond the current end of the stream
+     * or `ALREADY_EXISTS` if user provids an `offset` that has already been
+     * written to. User can retry with adjusted offset within the same RPC
+     * stream. If `offset` is not specified, append happens at the end of the
+     * stream.
+     * The response contains the offset at which the append happened. Responses
+     * are received in the same order in which requests are sent. There will be
+     * one response for each successful request. If the `offset` is not set in
+     * response, it means append didn't happen due to some errors. If one request
+     * fails, all the subsequent requests will also fail until a success request
+     * is made again.
+     * If the stream is of `PENDING` type, data will only be available for read
+     * operations after the stream is committed.
+     * 
+ */ + @java.lang.Deprecated + public io.grpc.stub.StreamObserver + appendRows( + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse> + responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getAppendRowsMethod(), getCallOptions()), responseObserver); + } + + /** + * + * + *
+     * Gets a write stream.
+     * 
+ */ + @java.lang.Deprecated + public void getWriteStream( + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetWriteStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + @java.lang.Deprecated + public void finalizeWriteStream( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getFinalizeWriteStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + @java.lang.Deprecated + public void batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchCommitWriteStreamsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + @java.lang.Deprecated + public void flushRows( + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getFlushRowsMethod(), getCallOptions()), request, responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service BigQueryWrite. + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * The [google.cloud.bigquery.storage.v1
+   *   API](/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1)
+   *   should be used instead of the v1beta2 API for BigQueryWrite operations.
+   * 
+ */ + @java.lang.Deprecated + public static final class BigQueryWriteBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private BigQueryWriteBlockingV2Stub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special COMMITTED stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + @java.lang.Deprecated + public com.google.cloud.bigquery.storage.v1beta2.WriteStream createWriteStream( + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Appends data to the given stream.
+     * If `offset` is specified, the `offset` is checked against the end of
+     * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+     * attempt is made to append to an offset beyond the current end of the stream
+     * or `ALREADY_EXISTS` if user provids an `offset` that has already been
+     * written to. User can retry with adjusted offset within the same RPC
+     * stream. If `offset` is not specified, append happens at the end of the
+     * stream.
+     * The response contains the offset at which the append happened. Responses
+     * are received in the same order in which requests are sent. There will be
+     * one response for each successful request. If the `offset` is not set in
+     * response, it means append didn't happen due to some errors. If one request
+     * fails, all the subsequent requests will also fail until a success request
+     * is made again.
+     * If the stream is of `PENDING` type, data will only be available for read
+     * operations after the stream is committed.
+     * 
+ */ + @java.lang.Deprecated + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse> + appendRows() { + return io.grpc.stub.ClientCalls.blockingBidiStreamingCall( + getChannel(), getAppendRowsMethod(), getCallOptions()); + } + + /** + * + * + *
+     * Gets a write stream.
+     * 
+ */ + @java.lang.Deprecated + public com.google.cloud.bigquery.storage.v1beta2.WriteStream getWriteStream( + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + @java.lang.Deprecated + public com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse + finalizeWriteStream( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getFinalizeWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + @java.lang.Deprecated + public com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse + batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchCommitWriteStreamsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + @java.lang.Deprecated + public com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse flushRows( + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getFlushRowsMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service BigQueryWrite. + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * The [google.cloud.bigquery.storage.v1
+   *   API](/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1)
+   *   should be used instead of the v1beta2 API for BigQueryWrite operations.
+   * 
+ */ + @java.lang.Deprecated + public static final class BigQueryWriteBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private BigQueryWriteBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special COMMITTED stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + @java.lang.Deprecated + public com.google.cloud.bigquery.storage.v1beta2.WriteStream createWriteStream( + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets a write stream.
+     * 
+ */ + @java.lang.Deprecated + public com.google.cloud.bigquery.storage.v1beta2.WriteStream getWriteStream( + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + @java.lang.Deprecated + public com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse + finalizeWriteStream( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getFinalizeWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + @java.lang.Deprecated + public com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse + batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchCommitWriteStreamsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + @java.lang.Deprecated + public com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse flushRows( + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getFlushRowsMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service BigQueryWrite. + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * The [google.cloud.bigquery.storage.v1
+   *   API](/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1)
+   *   should be used instead of the v1beta2 API for BigQueryWrite operations.
+   * 
+ */ + @java.lang.Deprecated + public static final class BigQueryWriteFutureStub + extends io.grpc.stub.AbstractFutureStub { + private BigQueryWriteFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * Additionally, every table has a special COMMITTED stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
+     * 
+ */ + @java.lang.Deprecated + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta2.WriteStream> + createWriteStream( + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateWriteStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Gets a write stream.
+     * 
+ */ + @java.lang.Deprecated + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta2.WriteStream> + getWriteStream(com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetWriteStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream. Finalize is not supported on the '_default' stream.
+     * 
+ */ + @java.lang.Deprecated + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse> + finalizeWriteStream( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getFinalizeWriteStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + @java.lang.Deprecated + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse> + batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchCommitWriteStreamsMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Flushes rows to a BUFFERED stream.
+     * If users are appending rows to BUFFERED stream, flush operation is
+     * required in order for the rows to become available for reading. A
+     * Flush operation flushes up to any previously flushed offset in a BUFFERED
+     * stream, to the offset specified in the request.
+     * Flush is not supported on the _default stream, since it is not BUFFERED.
+     * 
+ */ + @java.lang.Deprecated + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse> + flushRows(com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getFlushRowsMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_WRITE_STREAM = 0; + private static final int METHODID_GET_WRITE_STREAM = 1; + private static final int METHODID_FINALIZE_WRITE_STREAM = 2; + private static final int METHODID_BATCH_COMMIT_WRITE_STREAMS = 3; + private static final int METHODID_FLUSH_ROWS = 4; + private static final int METHODID_APPEND_ROWS = 5; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_WRITE_STREAM: + serviceImpl.createWriteStream( + (com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_WRITE_STREAM: + serviceImpl.getWriteStream( + (com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_FINALIZE_WRITE_STREAM: + serviceImpl.finalizeWriteStream( + (com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse>) + responseObserver); + break; + case METHODID_BATCH_COMMIT_WRITE_STREAMS: + serviceImpl.batchCommitWriteStreams( + (com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse>) + responseObserver); + break; + case METHODID_FLUSH_ROWS: + serviceImpl.flushRows( + (com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse>) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_APPEND_ROWS: + return (io.grpc.stub.StreamObserver) + serviceImpl.appendRows( + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse>) + responseObserver); + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateWriteStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.WriteStream>( + service, METHODID_CREATE_WRITE_STREAM))) + .addMethod( + getAppendRowsMethod(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse>( + service, METHODID_APPEND_ROWS))) + .addMethod( + getGetWriteStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.WriteStream>( + service, METHODID_GET_WRITE_STREAM))) + .addMethod( + getFinalizeWriteStreamMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse>( + service, METHODID_FINALIZE_WRITE_STREAM))) + .addMethod( + getBatchCommitWriteStreamsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse>( + service, METHODID_BATCH_COMMIT_WRITE_STREAMS))) + .addMethod( + getFlushRowsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse>( + service, METHODID_FLUSH_ROWS))) + .build(); + } + + private abstract static class BigQueryWriteBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + BigQueryWriteBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("BigQueryWrite"); + } + } + + private static final class BigQueryWriteFileDescriptorSupplier + extends BigQueryWriteBaseDescriptorSupplier { + BigQueryWriteFileDescriptorSupplier() {} + } + + private static final class BigQueryWriteMethodDescriptorSupplier + extends BigQueryWriteBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + BigQueryWriteMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (BigQueryWriteGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new BigQueryWriteFileDescriptorSupplier()) + .addMethod(getCreateWriteStreamMethod()) + .addMethod(getAppendRowsMethod()) + .addMethod(getGetWriteStreamMethod()) + .addMethod(getFinalizeWriteStreamMethod()) + .addMethod(getBatchCommitWriteStreamsMethod()) + .addMethod(getFlushRowsMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-bigquerystorage/owlbot.py b/java-bigquerystorage/owlbot.py new file mode 100644 index 000000000000..36c466fb760d --- /dev/null +++ b/java-bigquerystorage/owlbot.py @@ -0,0 +1,35 @@ +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import synthtool as s +from synthtool.languages import java + +for library in s.get_staging_dirs(): + # put any special-case replacements here + s.move(library) + +s.remove_staging_dirs() +java.common_templates(monorepo=True, excludes=[ + ".github/*", + ".kokoro/*", + "samples/*", + "CODE_OF_CONDUCT.md", + "CONTRIBUTING.md", + "LICENSE", + "SECURITY.md", + "java.header", + "license-checks.xml", + "renovate.json", + ".gitignore" +]) diff --git a/java-bigquerystorage/pom.xml b/java-bigquerystorage/pom.xml new file mode 100644 index 000000000000..686c69c783b7 --- /dev/null +++ b/java-bigquerystorage/pom.xml @@ -0,0 +1,233 @@ + + + 4.0.0 + com.google.cloud + google-cloud-bigquerystorage-parent + pom + 3.19.1 + BigQuery Storage Parent + https://github.com/googleapis/google-cloud-java + + Java idiomatic client for Google Cloud Platform services. + + + + com.google.cloud + google-cloud-jar-parent + 1.77.0-SNAPSHOT + ../google-cloud-jar-parent/pom.xml + + + + + chingor + Jeff Ching + chingor@google.com + Google + + Developer + + + + + Google LLC + + + scm:git:git@github.com:googleapis/google-cloud-java.git + scm:git:git@github.com:googleapis/google-cloud-java.git + https://github.com/googleapis/google-cloud-java + HEAD + + + https://github.com/googleapis/google-cloud-java/issues + GitHub Issues + + + + + Apache-2.0 + https://www.apache.org/licenses/LICENSE-2.0.txt + + + + + UTF-8 + UTF-8 + 3.25.4 + github + google-cloud-bigquerystorage-parent + + + + + + never + + + false + + central + Central Repository + https://repo.maven.apache.org/maven2 + + + + + + + com.google.cloud + google-cloud-shared-dependencies + ${google-cloud-shared-dependencies.version} + pom + import + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta + 3.19.1 + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta + 3.19.1 + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1alpha + 3.19.1 + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1alpha + 3.19.1 + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta1 + 0.191.1 + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta2 + 0.191.1 + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1 + 3.19.1 + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta1 + 0.191.1 + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta2 + 0.191.1 + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1 + 3.19.1 + + + com.google.cloud + google-cloud-bigquerystorage + 3.19.1 + + + org.json + json + 20250517 + + + + junit + junit + 4.13.2 + test + + + com.google.cloud + google-cloud-bigquery + 2.57.1 + test + + + com.google.cloud + google-cloud-bigquerystorage + + + + + + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.7.0 + true + + ossrh + https://google.oss.sonatype.org/ + false + 15 + + + + org.apache.maven.plugins + maven-dependency-plugin + + + org.objenesis:objenesis + javax.annotation:javax.annotation-api + + org.junit.jupiter:junit-jupiter-engine + + org.junit.vintage:junit-vintage-engine + + + + + + + + org.codehaus.mojo + exec-maven-plugin + + true + + + + + + + google-cloud-bigquerystorage + grpc-google-cloud-bigquerystorage-v1beta1 + grpc-google-cloud-bigquerystorage-v1beta2 + grpc-google-cloud-bigquerystorage-v1 + grpc-google-cloud-bigquerystorage-v1alpha + grpc-google-cloud-bigquerystorage-v1beta + proto-google-cloud-bigquerystorage-v1beta1 + proto-google-cloud-bigquerystorage-v1beta2 + proto-google-cloud-bigquerystorage-v1 + proto-google-cloud-bigquerystorage-v1alpha + proto-google-cloud-bigquerystorage-v1beta + google-cloud-bigquerystorage-bom + + + + + include-samples + + samples + tutorials + + + + + diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/clirr-ignored-differences.xml b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/clirr-ignored-differences.xml new file mode 100644 index 000000000000..bc949a507a34 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/clirr-ignored-differences.xml @@ -0,0 +1,81 @@ + + + + + 7012 + com/google/cloud/bigquery/storage/v1/*OrBuilder + * get*(*) + + + 7012 + com/google/cloud/bigquery/storage/v1/*OrBuilder + boolean contains*(*) + + + 7012 + com/google/cloud/bigquery/storage/v1/*OrBuilder + boolean has*(*) + + + + + 7006 + com/google/cloud/bigquery/storage/v1/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/cloud/bigquery/storage/v1/** + * addRepeatedField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1/** + * clear() + ** + + + 7006 + com/google/cloud/bigquery/storage/v1/** + * clearField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1/** + * clearOneof(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1/** + * clone() + ** + + + 7006 + com/google/cloud/bigquery/storage/v1/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1/** + * setField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1/** + * setRepeatedField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1/** + * setUnknownFields(*) + ** + + diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/pom.xml b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/pom.xml new file mode 100644 index 000000000000..3638851b4a8b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/pom.xml @@ -0,0 +1,42 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1 + 3.19.1 + proto-google-cloud-bigquerystorage-v1 + PROTO library for proto-google-cloud-bigquerystorage-v1 + + com.google.cloud + google-cloud-bigquerystorage-parent + 3.19.1 + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api + api-common + + + com.google.guava + guava + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AnnotationsProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AnnotationsProto.java new file mode 100644 index 000000000000..d3d9668257ce --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AnnotationsProto.java @@ -0,0 +1,91 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/annotations.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public final class AnnotationsProto { + private AnnotationsProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) { + registry.add(com.google.cloud.bigquery.storage.v1.AnnotationsProto.columnName); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + public static final int COLUMN_NAME_FIELD_NUMBER = 454943157; + + /** + * + * + *
+   * Setting the column_name extension allows users to reference
+   * bigquery column independently of the field name in the protocol buffer
+   * message.
+   *
+   * The intended use of this annotation is to reference a destination column
+   * named using characters unavailable for protobuf field names (e.g. unicode
+   * characters).
+   *
+   * More details about BigQuery naming limitations can be found here:
+   * https://cloud.google.com/bigquery/docs/schemas#column_names
+   *
+   * This extension is currently experimental.
+   * 
+ * + * extend .google.protobuf.FieldOptions { ... } + */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, java.lang.String> + columnName = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + java.lang.String.class, null); + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n2google/cloud/bigquery/storage/v1/annot" + + "ations.proto\022 google.cloud.bigquery.stor" + + "age.v1\032 google/protobuf/descriptor.proto" + + ":9\n\013column_name\022\035.google.protobuf.FieldO" + + "ptions\030\265\303\367\330\001 \001(\t\210\001\001B\300\001\n$com.google.cloud" + + ".bigquery.storage.v1B\020AnnotationsProtoP\001" + + "Z>cloud.google.com/go/bigquery/storage/a" + + "piv1/storagepb;storagepb\252\002 Google.Cloud." + + "BigQuery.Storage.V1\312\002 Google\\Cloud\\BigQu" + + "ery\\Storage\\V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.DescriptorProtos.getDescriptor(), + }); + columnName.internalInit(descriptor.getExtensions().get(0)); + com.google.protobuf.DescriptorProtos.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java new file mode 100644 index 000000000000..8092298eba51 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java @@ -0,0 +1,5868 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `AppendRows`.
+ *
+ * Because AppendRows is a bidirectional streaming RPC, certain parts of the
+ * AppendRowsRequest need only be specified for the first request before
+ * switching table destinations. You can also switch table destinations within
+ * the same connection for the default stream.
+ *
+ * The size of a single AppendRowsRequest must be less than 10 MB in size.
+ * Requests larger than this return an error, typically `INVALID_ARGUMENT`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest} + */ +public final class AppendRowsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.AppendRowsRequest) + AppendRowsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AppendRowsRequest.newBuilder() to construct. + private AppendRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AppendRowsRequest() { + writeStream_ = ""; + traceId_ = ""; + defaultMissingValueInterpretation_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AppendRowsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 7: + return internalGetMissingValueInterpretations(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.class, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.Builder.class); + } + + /** + * + * + *
+   * An enum to indicate how to interpret missing values of fields that are
+   * present in user schema but missing in rows. A missing value can represent a
+   * NULL or a column default value defined in BigQuery table schema.
+   * 
+ * + * Protobuf enum {@code + * google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation} + */ + public enum MissingValueInterpretation implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Invalid missing value interpretation. Requests with this value will be
+     * rejected.
+     * 
+ * + * MISSING_VALUE_INTERPRETATION_UNSPECIFIED = 0; + */ + MISSING_VALUE_INTERPRETATION_UNSPECIFIED(0), + /** + * + * + *
+     * Missing value is interpreted as NULL.
+     * 
+ * + * NULL_VALUE = 1; + */ + NULL_VALUE(1), + /** + * + * + *
+     * Missing value is interpreted as column default value if declared in the
+     * table schema, NULL otherwise.
+     * 
+ * + * DEFAULT_VALUE = 2; + */ + DEFAULT_VALUE(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Invalid missing value interpretation. Requests with this value will be
+     * rejected.
+     * 
+ * + * MISSING_VALUE_INTERPRETATION_UNSPECIFIED = 0; + */ + public static final int MISSING_VALUE_INTERPRETATION_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * Missing value is interpreted as NULL.
+     * 
+ * + * NULL_VALUE = 1; + */ + public static final int NULL_VALUE_VALUE = 1; + + /** + * + * + *
+     * Missing value is interpreted as column default value if declared in the
+     * table schema, NULL otherwise.
+     * 
+ * + * DEFAULT_VALUE = 2; + */ + public static final int DEFAULT_VALUE_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static MissingValueInterpretation valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static MissingValueInterpretation forNumber(int value) { + switch (value) { + case 0: + return MISSING_VALUE_INTERPRETATION_UNSPECIFIED; + case 1: + return NULL_VALUE; + case 2: + return DEFAULT_VALUE; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public MissingValueInterpretation findValueByNumber(int number) { + return MissingValueInterpretation.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final MissingValueInterpretation[] VALUES = values(); + + public static MissingValueInterpretation valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private MissingValueInterpretation(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation) + } + + public interface ArrowDataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. Arrow Schema used to serialize the data.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + boolean hasWriterSchema(); + + /** + * + * + *
+     * Optional. Arrow Schema used to serialize the data.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + * + * @return The writerSchema. + */ + com.google.cloud.bigquery.storage.v1.ArrowSchema getWriterSchema(); + + /** + * + * + *
+     * Optional. Arrow Schema used to serialize the data.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + */ + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder getWriterSchemaOrBuilder(); + + /** + * + * + *
+     * Required. Serialized row data in Arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + * + * @return Whether the rows field is set. + */ + boolean hasRows(); + + /** + * + * + *
+     * Required. Serialized row data in Arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + * + * @return The rows. + */ + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch getRows(); + + /** + * + * + *
+     * Required. Serialized row data in Arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + */ + com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder getRowsOrBuilder(); + } + + /** + * + * + *
+   * Arrow schema and data.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData} + */ + public static final class ArrowData extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) + ArrowDataOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ArrowData.newBuilder() to construct. + private ArrowData(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ArrowData() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ArrowData(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ArrowData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ArrowData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.class, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.Builder.class); + } + + private int bitField0_; + public static final int WRITER_SCHEMA_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1.ArrowSchema writerSchema_; + + /** + * + * + *
+     * Optional. Arrow Schema used to serialize the data.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + @java.lang.Override + public boolean hasWriterSchema() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Optional. Arrow Schema used to serialize the data.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + * + * @return The writerSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchema getWriterSchema() { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance() + : writerSchema_; + } + + /** + * + * + *
+     * Optional. Arrow Schema used to serialize the data.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder getWriterSchemaOrBuilder() { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance() + : writerSchema_; + } + + public static final int ROWS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1.ArrowRecordBatch rows_; + + /** + * + * + *
+     * Required. Serialized row data in Arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + * + * @return Whether the rows field is set. + */ + @java.lang.Override + public boolean hasRows() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. Serialized row data in Arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + * + * @return The rows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatch getRows() { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance() + : rows_; + } + + /** + * + * + *
+     * Required. Serialized row data in Arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder getRowsOrBuilder() { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance() + : rows_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getWriterSchema()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getRows()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getWriterSchema()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getRows()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData other = + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) obj; + + if (hasWriterSchema() != other.hasWriterSchema()) return false; + if (hasWriterSchema()) { + if (!getWriterSchema().equals(other.getWriterSchema())) return false; + } + if (hasRows() != other.hasRows()) return false; + if (hasRows()) { + if (!getRows().equals(other.getRows())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasWriterSchema()) { + hash = (37 * hash) + WRITER_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getWriterSchema().hashCode(); + } + if (hasRows()) { + hash = (37 * hash) + ROWS_FIELD_NUMBER; + hash = (53 * hash) + getRows().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Arrow schema and data.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ArrowData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ArrowData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.class, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getWriterSchemaFieldBuilder(); + getRowsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + writerSchema_ = null; + if (writerSchemaBuilder_ != null) { + writerSchemaBuilder_.dispose(); + writerSchemaBuilder_ = null; + } + rows_ = null; + if (rowsBuilder_ != null) { + rowsBuilder_.dispose(); + rowsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ArrowData_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData build() { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData buildPartial() { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData result = + new com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.writerSchema_ = + writerSchemaBuilder_ == null ? writerSchema_ : writerSchemaBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.rows_ = rowsBuilder_ == null ? rows_ : rowsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData other) { + if (other + == com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData + .getDefaultInstance()) return this; + if (other.hasWriterSchema()) { + mergeWriterSchema(other.getWriterSchema()); + } + if (other.hasRows()) { + mergeRows(other.getRows()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getWriterSchemaFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getRowsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1.ArrowSchema writerSchema_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowSchema, + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder> + writerSchemaBuilder_; + + /** + * + * + *
+       * Optional. Arrow Schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + public boolean hasWriterSchema() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Optional. Arrow Schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + * + * @return The writerSchema. + */ + public com.google.cloud.bigquery.storage.v1.ArrowSchema getWriterSchema() { + if (writerSchemaBuilder_ == null) { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance() + : writerSchema_; + } else { + return writerSchemaBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Optional. Arrow Schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + */ + public Builder setWriterSchema(com.google.cloud.bigquery.storage.v1.ArrowSchema value) { + if (writerSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writerSchema_ = value; + } else { + writerSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Arrow Schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + */ + public Builder setWriterSchema( + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder builderForValue) { + if (writerSchemaBuilder_ == null) { + writerSchema_ = builderForValue.build(); + } else { + writerSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Arrow Schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + */ + public Builder mergeWriterSchema(com.google.cloud.bigquery.storage.v1.ArrowSchema value) { + if (writerSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && writerSchema_ != null + && writerSchema_ + != com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance()) { + getWriterSchemaBuilder().mergeFrom(value); + } else { + writerSchema_ = value; + } + } else { + writerSchemaBuilder_.mergeFrom(value); + } + if (writerSchema_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Optional. Arrow Schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + */ + public Builder clearWriterSchema() { + bitField0_ = (bitField0_ & ~0x00000001); + writerSchema_ = null; + if (writerSchemaBuilder_ != null) { + writerSchemaBuilder_.dispose(); + writerSchemaBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Arrow Schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + */ + public com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder getWriterSchemaBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getWriterSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. Arrow Schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + */ + public com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder getWriterSchemaOrBuilder() { + if (writerSchemaBuilder_ != null) { + return writerSchemaBuilder_.getMessageOrBuilder(); + } else { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance() + : writerSchema_; + } + } + + /** + * + * + *
+       * Optional. Arrow Schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowSchema writer_schema = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowSchema, + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder> + getWriterSchemaFieldBuilder() { + if (writerSchemaBuilder_ == null) { + writerSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowSchema, + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder>( + getWriterSchema(), getParentForChildren(), isClean()); + writerSchema_ = null; + } + return writerSchemaBuilder_; + } + + private com.google.cloud.bigquery.storage.v1.ArrowRecordBatch rows_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder> + rowsBuilder_; + + /** + * + * + *
+       * Required. Serialized row data in Arrow format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + * + * @return Whether the rows field is set. + */ + public boolean hasRows() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Required. Serialized row data in Arrow format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + * + * @return The rows. + */ + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatch getRows() { + if (rowsBuilder_ == null) { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance() + : rows_; + } else { + return rowsBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Required. Serialized row data in Arrow format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + */ + public Builder setRows(com.google.cloud.bigquery.storage.v1.ArrowRecordBatch value) { + if (rowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + } else { + rowsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Serialized row data in Arrow format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + */ + public Builder setRows( + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder builderForValue) { + if (rowsBuilder_ == null) { + rows_ = builderForValue.build(); + } else { + rowsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Serialized row data in Arrow format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + */ + public Builder mergeRows(com.google.cloud.bigquery.storage.v1.ArrowRecordBatch value) { + if (rowsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && rows_ != null + && rows_ + != com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance()) { + getRowsBuilder().mergeFrom(value); + } else { + rows_ = value; + } + } else { + rowsBuilder_.mergeFrom(value); + } + if (rows_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Required. Serialized row data in Arrow format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + */ + public Builder clearRows() { + bitField0_ = (bitField0_ & ~0x00000002); + rows_ = null; + if (rowsBuilder_ != null) { + rowsBuilder_.dispose(); + rowsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Serialized row data in Arrow format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + */ + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder getRowsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRowsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Required. Serialized row data in Arrow format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + */ + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder getRowsOrBuilder() { + if (rowsBuilder_ != null) { + return rowsBuilder_.getMessageOrBuilder(); + } else { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance() + : rows_; + } + } + + /** + * + * + *
+       * Required. Serialized row data in Arrow format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch rows = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder> + getRowsFieldBuilder() { + if (rowsBuilder_ == null) { + rowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder>( + getRows(), getParentForChildren(), isClean()); + rows_ = null; + } + return rowsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) + private static final com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData(); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ArrowData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ProtoDataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. The protocol buffer schema used to serialize the data. Provide
+     * this value whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + boolean hasWriterSchema(); + + /** + * + * + *
+     * Optional. The protocol buffer schema used to serialize the data. Provide
+     * this value whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + * + * @return The writerSchema. + */ + com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema(); + + /** + * + * + *
+     * Optional. The protocol buffer schema used to serialize the data. Provide
+     * this value whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder getWriterSchemaOrBuilder(); + + /** + * + * + *
+     * Required. Serialized row data in protobuf message format.
+     * Currently, the backend expects the serialized rows to adhere to
+     * proto2 semantics when appending rows, particularly with respect to
+     * how default values are encoded.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + * + * @return Whether the rows field is set. + */ + boolean hasRows(); + + /** + * + * + *
+     * Required. Serialized row data in protobuf message format.
+     * Currently, the backend expects the serialized rows to adhere to
+     * proto2 semantics when appending rows, particularly with respect to
+     * how default values are encoded.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + * + * @return The rows. + */ + com.google.cloud.bigquery.storage.v1.ProtoRows getRows(); + + /** + * + * + *
+     * Required. Serialized row data in protobuf message format.
+     * Currently, the backend expects the serialized rows to adhere to
+     * proto2 semantics when appending rows, particularly with respect to
+     * how default values are encoded.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder getRowsOrBuilder(); + } + + /** + * + * + *
+   * ProtoData contains the data rows and schema when constructing append
+   * requests.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData} + */ + public static final class ProtoData extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) + ProtoDataOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ProtoData.newBuilder() to construct. + private ProtoData(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ProtoData() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ProtoData(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.class, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder.class); + } + + private int bitField0_; + public static final int WRITER_SCHEMA_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1.ProtoSchema writerSchema_; + + /** + * + * + *
+     * Optional. The protocol buffer schema used to serialize the data. Provide
+     * this value whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + @java.lang.Override + public boolean hasWriterSchema() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Optional. The protocol buffer schema used to serialize the data. Provide
+     * this value whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + * + * @return The writerSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema() { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoSchema.getDefaultInstance() + : writerSchema_; + } + + /** + * + * + *
+     * Optional. The protocol buffer schema used to serialize the data. Provide
+     * this value whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder getWriterSchemaOrBuilder() { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoSchema.getDefaultInstance() + : writerSchema_; + } + + public static final int ROWS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1.ProtoRows rows_; + + /** + * + * + *
+     * Required. Serialized row data in protobuf message format.
+     * Currently, the backend expects the serialized rows to adhere to
+     * proto2 semantics when appending rows, particularly with respect to
+     * how default values are encoded.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + * + * @return Whether the rows field is set. + */ + @java.lang.Override + public boolean hasRows() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. Serialized row data in protobuf message format.
+     * Currently, the backend expects the serialized rows to adhere to
+     * proto2 semantics when appending rows, particularly with respect to
+     * how default values are encoded.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + * + * @return The rows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoRows getRows() { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoRows.getDefaultInstance() + : rows_; + } + + /** + * + * + *
+     * Required. Serialized row data in protobuf message format.
+     * Currently, the backend expects the serialized rows to adhere to
+     * proto2 semantics when appending rows, particularly with respect to
+     * how default values are encoded.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder getRowsOrBuilder() { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoRows.getDefaultInstance() + : rows_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasWriterSchema()) { + if (!getWriterSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getWriterSchema()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getRows()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getWriterSchema()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getRows()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData other = + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) obj; + + if (hasWriterSchema() != other.hasWriterSchema()) return false; + if (hasWriterSchema()) { + if (!getWriterSchema().equals(other.getWriterSchema())) return false; + } + if (hasRows() != other.hasRows()) return false; + if (hasRows()) { + if (!getRows().equals(other.getRows())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasWriterSchema()) { + hash = (37 * hash) + WRITER_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getWriterSchema().hashCode(); + } + if (hasRows()) { + hash = (37 * hash) + ROWS_FIELD_NUMBER; + hash = (53 * hash) + getRows().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * ProtoData contains the data rows and schema when constructing append
+     * requests.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.class, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getWriterSchemaFieldBuilder(); + getRowsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + writerSchema_ = null; + if (writerSchemaBuilder_ != null) { + writerSchemaBuilder_.dispose(); + writerSchemaBuilder_ = null; + } + rows_ = null; + if (rowsBuilder_ != null) { + rowsBuilder_.dispose(); + rowsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData build() { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData buildPartial() { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData result = + new com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.writerSchema_ = + writerSchemaBuilder_ == null ? writerSchema_ : writerSchemaBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.rows_ = rowsBuilder_ == null ? rows_ : rowsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData other) { + if (other + == com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + .getDefaultInstance()) return this; + if (other.hasWriterSchema()) { + mergeWriterSchema(other.getWriterSchema()); + } + if (other.hasRows()) { + mergeRows(other.getRows()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (hasWriterSchema()) { + if (!getWriterSchema().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getWriterSchemaFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getRowsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1.ProtoSchema writerSchema_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ProtoSchema, + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder, + com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder> + writerSchemaBuilder_; + + /** + * + * + *
+       * Optional. The protocol buffer schema used to serialize the data. Provide
+       * this value whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + public boolean hasWriterSchema() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Optional. The protocol buffer schema used to serialize the data. Provide
+       * this value whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + * + * @return The writerSchema. + */ + public com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema() { + if (writerSchemaBuilder_ == null) { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoSchema.getDefaultInstance() + : writerSchema_; + } else { + return writerSchemaBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Optional. The protocol buffer schema used to serialize the data. Provide
+       * this value whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + public Builder setWriterSchema(com.google.cloud.bigquery.storage.v1.ProtoSchema value) { + if (writerSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writerSchema_ = value; + } else { + writerSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The protocol buffer schema used to serialize the data. Provide
+       * this value whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + public Builder setWriterSchema( + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder builderForValue) { + if (writerSchemaBuilder_ == null) { + writerSchema_ = builderForValue.build(); + } else { + writerSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The protocol buffer schema used to serialize the data. Provide
+       * this value whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + public Builder mergeWriterSchema(com.google.cloud.bigquery.storage.v1.ProtoSchema value) { + if (writerSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && writerSchema_ != null + && writerSchema_ + != com.google.cloud.bigquery.storage.v1.ProtoSchema.getDefaultInstance()) { + getWriterSchemaBuilder().mergeFrom(value); + } else { + writerSchema_ = value; + } + } else { + writerSchemaBuilder_.mergeFrom(value); + } + if (writerSchema_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Optional. The protocol buffer schema used to serialize the data. Provide
+       * this value whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + public Builder clearWriterSchema() { + bitField0_ = (bitField0_ & ~0x00000001); + writerSchema_ = null; + if (writerSchemaBuilder_ != null) { + writerSchemaBuilder_.dispose(); + writerSchemaBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The protocol buffer schema used to serialize the data. Provide
+       * this value whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + public com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder getWriterSchemaBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getWriterSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. The protocol buffer schema used to serialize the data. Provide
+       * this value whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + public com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder getWriterSchemaOrBuilder() { + if (writerSchemaBuilder_ != null) { + return writerSchemaBuilder_.getMessageOrBuilder(); + } else { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoSchema.getDefaultInstance() + : writerSchema_; + } + } + + /** + * + * + *
+       * Optional. The protocol buffer schema used to serialize the data. Provide
+       * this value whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ProtoSchema, + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder, + com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder> + getWriterSchemaFieldBuilder() { + if (writerSchemaBuilder_ == null) { + writerSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ProtoSchema, + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder, + com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder>( + getWriterSchema(), getParentForChildren(), isClean()); + writerSchema_ = null; + } + return writerSchemaBuilder_; + } + + private com.google.cloud.bigquery.storage.v1.ProtoRows rows_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ProtoRows, + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder, + com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder> + rowsBuilder_; + + /** + * + * + *
+       * Required. Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + * + * @return Whether the rows field is set. + */ + public boolean hasRows() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Required. Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + * + * @return The rows. + */ + public com.google.cloud.bigquery.storage.v1.ProtoRows getRows() { + if (rowsBuilder_ == null) { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoRows.getDefaultInstance() + : rows_; + } else { + return rowsBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Required. Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + public Builder setRows(com.google.cloud.bigquery.storage.v1.ProtoRows value) { + if (rowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + } else { + rowsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + public Builder setRows( + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder builderForValue) { + if (rowsBuilder_ == null) { + rows_ = builderForValue.build(); + } else { + rowsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + public Builder mergeRows(com.google.cloud.bigquery.storage.v1.ProtoRows value) { + if (rowsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && rows_ != null + && rows_ != com.google.cloud.bigquery.storage.v1.ProtoRows.getDefaultInstance()) { + getRowsBuilder().mergeFrom(value); + } else { + rows_ = value; + } + } else { + rowsBuilder_.mergeFrom(value); + } + if (rows_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Required. Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + public Builder clearRows() { + bitField0_ = (bitField0_ & ~0x00000002); + rows_ = null; + if (rowsBuilder_ != null) { + rowsBuilder_.dispose(); + rowsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + public com.google.cloud.bigquery.storage.v1.ProtoRows.Builder getRowsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRowsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Required. Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + public com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder getRowsOrBuilder() { + if (rowsBuilder_ != null) { + return rowsBuilder_.getMessageOrBuilder(); + } else { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1.ProtoRows.getDefaultInstance() + : rows_; + } + } + + /** + * + * + *
+       * Required. Serialized row data in protobuf message format.
+       * Currently, the backend expects the serialized rows to adhere to
+       * proto2 semantics when appending rows, particularly with respect to
+       * how default values are encoded.
+       * 
+ * + * .google.cloud.bigquery.storage.v1.ProtoRows rows = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ProtoRows, + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder, + com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder> + getRowsFieldBuilder() { + if (rowsBuilder_ == null) { + rowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ProtoRows, + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder, + com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder>( + getRows(), getParentForChildren(), isClean()); + rows_ = null; + } + return rowsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) + private static final com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData(); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProtoData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + private int rowsCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object rows_; + + public enum RowsCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + PROTO_ROWS(4), + ARROW_ROWS(5), + ROWS_NOT_SET(0); + private final int value; + + private RowsCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RowsCase valueOf(int value) { + return forNumber(value); + } + + public static RowsCase forNumber(int value) { + switch (value) { + case 4: + return PROTO_ROWS; + case 5: + return ARROW_ROWS; + case 0: + return ROWS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public static final int WRITE_STREAM_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object writeStream_ = ""; + + /** + * + * + *
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
+   *
+   * For explicitly created write streams, the format is:
+   *
+   * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+   *
+   * For the special default stream, the format is:
+   *
+   * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + @java.lang.Override + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
+   *
+   * For explicitly created write streams, the format is:
+   *
+   * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+   *
+   * For the special default stream, the format is:
+   *
+   * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + @java.lang.Override + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OFFSET_FIELD_NUMBER = 2; + private com.google.protobuf.Int64Value offset_; + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + @java.lang.Override + public boolean hasOffset() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + @java.lang.Override + public com.google.protobuf.Int64Value getOffset() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + @java.lang.Override + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + + public static final int PROTO_ROWS_FIELD_NUMBER = 4; + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return Whether the protoRows field is set. + */ + @java.lang.Override + public boolean hasProtoRows() { + return rowsCase_ == 4; + } + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return The protoRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData getProtoRows() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.getDefaultInstance(); + } + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder + getProtoRowsOrBuilder() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.getDefaultInstance(); + } + + public static final int ARROW_ROWS_FIELD_NUMBER = 5; + + /** + * + * + *
+   * Rows in arrow format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + * + * @return Whether the arrowRows field is set. + */ + @java.lang.Override + public boolean hasArrowRows() { + return rowsCase_ == 5; + } + + /** + * + * + *
+   * Rows in arrow format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + * + * @return The arrowRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData getArrowRows() { + if (rowsCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) rows_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.getDefaultInstance(); + } + + /** + * + * + *
+   * Rows in arrow format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowDataOrBuilder + getArrowRowsOrBuilder() { + if (rowsCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) rows_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.getDefaultInstance(); + } + + public static final int TRACE_ID_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object traceId_ = ""; + + /** + * + * + *
+   * Id set by client to annotate its identity. Only initial request setting is
+   * respected.
+   * 
+ * + * string trace_id = 6; + * + * @return The traceId. + */ + @java.lang.Override + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } + } + + /** + * + * + *
+   * Id set by client to annotate its identity. Only initial request setting is
+   * respected.
+   * 
+ * + * string trace_id = 6; + * + * @return The bytes for traceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int MISSING_VALUE_INTERPRETATIONS_FIELD_NUMBER = 7; + + private static final class MissingValueInterpretationsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_MissingValueInterpretationsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.ENUM, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + .MISSING_VALUE_INTERPRETATION_UNSPECIFIED + .getNumber()); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField + missingValueInterpretations_; + + private com.google.protobuf.MapField + internalGetMissingValueInterpretations() { + if (missingValueInterpretations_ == null) { + return com.google.protobuf.MapField.emptyMapField( + MissingValueInterpretationsDefaultEntryHolder.defaultEntry); + } + return missingValueInterpretations_; + } + + private static final com.google.protobuf.Internal.MapAdapter.Converter< + java.lang.Integer, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> + missingValueInterpretationsValueConverter = + com.google.protobuf.Internal.MapAdapter.newEnumConverter( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + .internalGetValueMap(), + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + .UNRECOGNIZED); + + private static final java.util.Map< + java.lang.String, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> + internalGetAdaptedMissingValueInterpretationsMap( + java.util.Map map) { + return new com.google.protobuf.Internal.MapAdapter< + java.lang.String, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation, + java.lang.Integer>(map, missingValueInterpretationsValueConverter); + } + + public int getMissingValueInterpretationsCount() { + return internalGetMissingValueInterpretations().getMap().size(); + } + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public boolean containsMissingValueInterpretations(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetMissingValueInterpretations().getMap().containsKey(key); + } + + /** Use {@link #getMissingValueInterpretationsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map< + java.lang.String, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> + getMissingValueInterpretations() { + return getMissingValueInterpretationsMap(); + } + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public java.util.Map< + java.lang.String, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> + getMissingValueInterpretationsMap() { + return internalGetAdaptedMissingValueInterpretationsMap( + internalGetMissingValueInterpretations().getMap()); + } + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public /* nullable */ com.google.cloud.bigquery.storage.v1.AppendRowsRequest + .MissingValueInterpretation + getMissingValueInterpretationsOrDefault( + java.lang.String key, + /* nullable */ + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMissingValueInterpretations().getMap(); + return map.containsKey(key) + ? missingValueInterpretationsValueConverter.doForward(map.get(key)) + : defaultValue; + } + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + getMissingValueInterpretationsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMissingValueInterpretations().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return missingValueInterpretationsValueConverter.doForward(map.get(key)); + } + + /** Use {@link #getMissingValueInterpretationsValueMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getMissingValueInterpretationsValue() { + return getMissingValueInterpretationsValueMap(); + } + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public java.util.Map + getMissingValueInterpretationsValueMap() { + return internalGetMissingValueInterpretations().getMap(); + } + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public int getMissingValueInterpretationsValueOrDefault(java.lang.String key, int defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMissingValueInterpretations().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public int getMissingValueInterpretationsValueOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMissingValueInterpretations().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int DEFAULT_MISSING_VALUE_INTERPRETATION_FIELD_NUMBER = 8; + private int defaultMissingValueInterpretation_ = 0; + + /** + * + * + *
+   * Optional. Default missing value interpretation for all columns in the
+   * table. When a value is specified on an `AppendRowsRequest`, it is applied
+   * to all requests from that point forward, until a subsequent
+   * `AppendRowsRequest` sets it to a different value.
+   * `missing_value_interpretation` can override
+   * `default_missing_value_interpretation`. For example, if you want to write
+   * `NULL` instead of using default values for some columns, you can set
+   * `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same
+   * time, set `missing_value_interpretations` to `NULL_VALUE` on those columns.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation default_missing_value_interpretation = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for defaultMissingValueInterpretation. + */ + @java.lang.Override + public int getDefaultMissingValueInterpretationValue() { + return defaultMissingValueInterpretation_; + } + + /** + * + * + *
+   * Optional. Default missing value interpretation for all columns in the
+   * table. When a value is specified on an `AppendRowsRequest`, it is applied
+   * to all requests from that point forward, until a subsequent
+   * `AppendRowsRequest` sets it to a different value.
+   * `missing_value_interpretation` can override
+   * `default_missing_value_interpretation`. For example, if you want to write
+   * `NULL` instead of using default values for some columns, you can set
+   * `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same
+   * time, set `missing_value_interpretations` to `NULL_VALUE` on those columns.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation default_missing_value_interpretation = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The defaultMissingValueInterpretation. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + getDefaultMissingValueInterpretation() { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation result = + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation.forNumber( + defaultMissingValueInterpretation_); + return result == null + ? com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + .UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasProtoRows()) { + if (!getProtoRows().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, writeStream_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getOffset()); + } + if (rowsCase_ == 4) { + output.writeMessage( + 4, (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_); + } + if (rowsCase_ == 5) { + output.writeMessage( + 5, (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) rows_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, traceId_); + } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, + internalGetMissingValueInterpretations(), + MissingValueInterpretationsDefaultEntryHolder.defaultEntry, + 7); + if (defaultMissingValueInterpretation_ + != com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + .MISSING_VALUE_INTERPRETATION_UNSPECIFIED + .getNumber()) { + output.writeEnum(8, defaultMissingValueInterpretation_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, writeStream_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOffset()); + } + if (rowsCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_); + } + if (rowsCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 5, (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) rows_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, traceId_); + } + for (java.util.Map.Entry entry : + internalGetMissingValueInterpretations().getMap().entrySet()) { + com.google.protobuf.MapEntry + missingValueInterpretations__ = + MissingValueInterpretationsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, missingValueInterpretations__); + } + if (defaultMissingValueInterpretation_ + != com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + .MISSING_VALUE_INTERPRETATION_UNSPECIFIED + .getNumber()) { + size += + com.google.protobuf.CodedOutputStream.computeEnumSize( + 8, defaultMissingValueInterpretation_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.AppendRowsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.AppendRowsRequest other = + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest) obj; + + if (!getWriteStream().equals(other.getWriteStream())) return false; + if (hasOffset() != other.hasOffset()) return false; + if (hasOffset()) { + if (!getOffset().equals(other.getOffset())) return false; + } + if (!getTraceId().equals(other.getTraceId())) return false; + if (!internalGetMissingValueInterpretations() + .equals(other.internalGetMissingValueInterpretations())) return false; + if (defaultMissingValueInterpretation_ != other.defaultMissingValueInterpretation_) + return false; + if (!getRowsCase().equals(other.getRowsCase())) return false; + switch (rowsCase_) { + case 4: + if (!getProtoRows().equals(other.getProtoRows())) return false; + break; + case 5: + if (!getArrowRows().equals(other.getArrowRows())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getWriteStream().hashCode(); + if (hasOffset()) { + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + getOffset().hashCode(); + } + hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTraceId().hashCode(); + if (!internalGetMissingValueInterpretations().getMap().isEmpty()) { + hash = (37 * hash) + MISSING_VALUE_INTERPRETATIONS_FIELD_NUMBER; + hash = (53 * hash) + internalGetMissingValueInterpretations().hashCode(); + } + hash = (37 * hash) + DEFAULT_MISSING_VALUE_INTERPRETATION_FIELD_NUMBER; + hash = (53 * hash) + defaultMissingValueInterpretation_; + switch (rowsCase_) { + case 4: + hash = (37 * hash) + PROTO_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getProtoRows().hashCode(); + break; + case 5: + hash = (37 * hash) + ARROW_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getArrowRows().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `AppendRows`.
+   *
+   * Because AppendRows is a bidirectional streaming RPC, certain parts of the
+   * AppendRowsRequest need only be specified for the first request before
+   * switching table destinations. You can also switch table destinations within
+   * the same connection for the default stream.
+   *
+   * The size of a single AppendRowsRequest must be less than 10 MB in size.
+   * Requests larger than this return an error, typically `INVALID_ARGUMENT`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.AppendRowsRequest) + com.google.cloud.bigquery.storage.v1.AppendRowsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 7: + return internalGetMissingValueInterpretations(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 7: + return internalGetMutableMissingValueInterpretations(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.class, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.AppendRowsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getOffsetFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + writeStream_ = ""; + offset_ = null; + if (offsetBuilder_ != null) { + offsetBuilder_.dispose(); + offsetBuilder_ = null; + } + if (protoRowsBuilder_ != null) { + protoRowsBuilder_.clear(); + } + if (arrowRowsBuilder_ != null) { + arrowRowsBuilder_.clear(); + } + traceId_ = ""; + internalGetMutableMissingValueInterpretations().clear(); + defaultMissingValueInterpretation_ = 0; + rowsCase_ = 0; + rows_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest build() { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest result = + new com.google.cloud.bigquery.storage.v1.AppendRowsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.AppendRowsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.writeStream_ = writeStream_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.offset_ = offsetBuilder_ == null ? offset_ : offsetBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.traceId_ = traceId_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.missingValueInterpretations_ = internalGetMissingValueInterpretations(); + result.missingValueInterpretations_.makeImmutable(); + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.defaultMissingValueInterpretation_ = defaultMissingValueInterpretation_; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.cloud.bigquery.storage.v1.AppendRowsRequest result) { + result.rowsCase_ = rowsCase_; + result.rows_ = this.rows_; + if (rowsCase_ == 4 && protoRowsBuilder_ != null) { + result.rows_ = protoRowsBuilder_.build(); + } + if (rowsCase_ == 5 && arrowRowsBuilder_ != null) { + result.rows_ = arrowRowsBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.AppendRowsRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.AppendRowsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.AppendRowsRequest other) { + if (other == com.google.cloud.bigquery.storage.v1.AppendRowsRequest.getDefaultInstance()) + return this; + if (!other.getWriteStream().isEmpty()) { + writeStream_ = other.writeStream_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasOffset()) { + mergeOffset(other.getOffset()); + } + if (!other.getTraceId().isEmpty()) { + traceId_ = other.traceId_; + bitField0_ |= 0x00000010; + onChanged(); + } + internalGetMutableMissingValueInterpretations() + .mergeFrom(other.internalGetMissingValueInterpretations()); + bitField0_ |= 0x00000020; + if (other.defaultMissingValueInterpretation_ != 0) { + setDefaultMissingValueInterpretationValue( + other.getDefaultMissingValueInterpretationValue()); + } + switch (other.getRowsCase()) { + case PROTO_ROWS: + { + mergeProtoRows(other.getProtoRows()); + break; + } + case ARROW_ROWS: + { + mergeArrowRows(other.getArrowRows()); + break; + } + case ROWS_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (hasProtoRows()) { + if (!getProtoRows().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + writeStream_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getOffsetFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 34: + { + input.readMessage(getProtoRowsFieldBuilder().getBuilder(), extensionRegistry); + rowsCase_ = 4; + break; + } // case 34 + case 42: + { + input.readMessage(getArrowRowsFieldBuilder().getBuilder(), extensionRegistry); + rowsCase_ = 5; + break; + } // case 42 + case 50: + { + traceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 50 + case 58: + { + com.google.protobuf.MapEntry + missingValueInterpretations__ = + input.readMessage( + MissingValueInterpretationsDefaultEntryHolder.defaultEntry + .getParserForType(), + extensionRegistry); + internalGetMutableMissingValueInterpretations() + .getMutableMap() + .put( + missingValueInterpretations__.getKey(), + missingValueInterpretations__.getValue()); + bitField0_ |= 0x00000020; + break; + } // case 58 + case 64: + { + defaultMissingValueInterpretation_ = input.readEnum(); + bitField0_ |= 0x00000040; + break; + } // case 64 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int rowsCase_ = 0; + private java.lang.Object rows_; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public Builder clearRows() { + rowsCase_ = 0; + rows_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object writeStream_ = ""; + + /** + * + * + *
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
+     *
+     * For explicitly created write streams, the format is:
+     *
+     * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+     *
+     * For the special default stream, the format is:
+     *
+     * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
+     *
+     * For explicitly created write streams, the format is:
+     *
+     * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+     *
+     * For the special default stream, the format is:
+     *
+     * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
+     *
+     * For explicitly created write streams, the format is:
+     *
+     * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+     *
+     * For the special default stream, the format is:
+     *
+     * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStream(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + writeStream_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
+     *
+     * For explicitly created write streams, the format is:
+     *
+     * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+     *
+     * For the special default stream, the format is:
+     *
+     * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearWriteStream() { + writeStream_ = getDefaultInstance().getWriteStream(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
+     *
+     * For explicitly created write streams, the format is:
+     *
+     * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+     *
+     * For the special default stream, the format is:
+     *
+     * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + writeStream_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Int64Value offset_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + offsetBuilder_; + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + public boolean hasOffset() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + public com.google.protobuf.Int64Value getOffset() { + if (offsetBuilder_ == null) { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } else { + return offsetBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder setOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + offset_ = value; + } else { + offsetBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder setOffset(com.google.protobuf.Int64Value.Builder builderForValue) { + if (offsetBuilder_ == null) { + offset_ = builderForValue.build(); + } else { + offsetBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder mergeOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && offset_ != null + && offset_ != com.google.protobuf.Int64Value.getDefaultInstance()) { + getOffsetBuilder().mergeFrom(value); + } else { + offset_ = value; + } + } else { + offsetBuilder_.mergeFrom(value); + } + if (offset_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder clearOffset() { + bitField0_ = (bitField0_ & ~0x00000002); + offset_ = null; + if (offsetBuilder_ != null) { + offsetBuilder_.dispose(); + offsetBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public com.google.protobuf.Int64Value.Builder getOffsetBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getOffsetFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + if (offsetBuilder_ != null) { + return offsetBuilder_.getMessageOrBuilder(); + } else { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + getOffsetFieldBuilder() { + if (offsetBuilder_ == null) { + offsetBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder>( + getOffset(), getParentForChildren(), isClean()); + offset_ = null; + } + return offsetBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder> + protoRowsBuilder_; + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return Whether the protoRows field is set. + */ + @java.lang.Override + public boolean hasProtoRows() { + return rowsCase_ == 4; + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return The protoRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData getProtoRows() { + if (protoRowsBuilder_ == null) { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } else { + if (rowsCase_ == 4) { + return protoRowsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + public Builder setProtoRows( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData value) { + if (protoRowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + protoRowsBuilder_.setMessage(value); + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + public Builder setProtoRows( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder builderForValue) { + if (protoRowsBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + protoRowsBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + public Builder mergeProtoRows( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData value) { + if (protoRowsBuilder_ == null) { + if (rowsCase_ == 4 + && rows_ + != com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + .getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.newBuilder( + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 4) { + protoRowsBuilder_.mergeFrom(value); + } else { + protoRowsBuilder_.setMessage(value); + } + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + public Builder clearProtoRows() { + if (protoRowsBuilder_ == null) { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + } + protoRowsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder + getProtoRowsBuilder() { + return getProtoRowsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder + getProtoRowsOrBuilder() { + if ((rowsCase_ == 4) && (protoRowsBuilder_ != null)) { + return protoRowsBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder> + getProtoRowsFieldBuilder() { + if (protoRowsBuilder_ == null) { + if (!(rowsCase_ == 4)) { + rows_ = + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.getDefaultInstance(); + } + protoRowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder>( + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData) rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 4; + onChanged(); + return protoRowsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowDataOrBuilder> + arrowRowsBuilder_; + + /** + * + * + *
+     * Rows in arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + * + * @return Whether the arrowRows field is set. + */ + @java.lang.Override + public boolean hasArrowRows() { + return rowsCase_ == 5; + } + + /** + * + * + *
+     * Rows in arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + * + * @return The arrowRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData getArrowRows() { + if (arrowRowsBuilder_ == null) { + if (rowsCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) rows_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData + .getDefaultInstance(); + } else { + if (rowsCase_ == 5) { + return arrowRowsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData + .getDefaultInstance(); + } + } + + /** + * + * + *
+     * Rows in arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + */ + public Builder setArrowRows( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData value) { + if (arrowRowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + arrowRowsBuilder_.setMessage(value); + } + rowsCase_ = 5; + return this; + } + + /** + * + * + *
+     * Rows in arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + */ + public Builder setArrowRows( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.Builder builderForValue) { + if (arrowRowsBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + arrowRowsBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 5; + return this; + } + + /** + * + * + *
+     * Rows in arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + */ + public Builder mergeArrowRows( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData value) { + if (arrowRowsBuilder_ == null) { + if (rowsCase_ == 5 + && rows_ + != com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData + .getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.newBuilder( + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 5) { + arrowRowsBuilder_.mergeFrom(value); + } else { + arrowRowsBuilder_.setMessage(value); + } + } + rowsCase_ = 5; + return this; + } + + /** + * + * + *
+     * Rows in arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + */ + public Builder clearArrowRows() { + if (arrowRowsBuilder_ == null) { + if (rowsCase_ == 5) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 5) { + rowsCase_ = 0; + rows_ = null; + } + arrowRowsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Rows in arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + */ + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.Builder + getArrowRowsBuilder() { + return getArrowRowsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Rows in arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowDataOrBuilder + getArrowRowsOrBuilder() { + if ((rowsCase_ == 5) && (arrowRowsBuilder_ != null)) { + return arrowRowsBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) rows_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData + .getDefaultInstance(); + } + } + + /** + * + * + *
+     * Rows in arrow format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowDataOrBuilder> + getArrowRowsFieldBuilder() { + if (arrowRowsBuilder_ == null) { + if (!(rowsCase_ == 5)) { + rows_ = + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.getDefaultInstance(); + } + arrowRowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowDataOrBuilder>( + (com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData) rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 5; + onChanged(); + return arrowRowsBuilder_; + } + + private java.lang.Object traceId_ = ""; + + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @return The traceId. + */ + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @return The bytes for traceId. + */ + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @param value The traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + traceId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @return This builder for chaining. + */ + public Builder clearTraceId() { + traceId_ = getDefaultInstance().getTraceId(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @param value The bytes for traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + traceId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private com.google.protobuf.MapField + missingValueInterpretations_; + + private com.google.protobuf.MapField + internalGetMissingValueInterpretations() { + if (missingValueInterpretations_ == null) { + return com.google.protobuf.MapField.emptyMapField( + MissingValueInterpretationsDefaultEntryHolder.defaultEntry); + } + return missingValueInterpretations_; + } + + private com.google.protobuf.MapField + internalGetMutableMissingValueInterpretations() { + if (missingValueInterpretations_ == null) { + missingValueInterpretations_ = + com.google.protobuf.MapField.newMapField( + MissingValueInterpretationsDefaultEntryHolder.defaultEntry); + } + if (!missingValueInterpretations_.isMutable()) { + missingValueInterpretations_ = missingValueInterpretations_.copy(); + } + bitField0_ |= 0x00000020; + onChanged(); + return missingValueInterpretations_; + } + + public int getMissingValueInterpretationsCount() { + return internalGetMissingValueInterpretations().getMap().size(); + } + + /** + * + * + *
+     * A map to indicate how to interpret missing value for some fields. Missing
+     * values are fields present in user schema but missing in rows. The key is
+     * the field name. The value is the interpretation of missing values for the
+     * field.
+     *
+     * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+     * missing values in field foo are interpreted as NULL, all missing values in
+     * field bar are interpreted as the default value of field bar in table
+     * schema.
+     *
+     * If a field is not in this map and has missing values, the missing values
+     * in this field are interpreted as NULL.
+     *
+     * This field only applies to the current request, it won't affect other
+     * requests on the connection.
+     *
+     * Currently, field name can only be top-level column name, can't be a struct
+     * field path like 'foo.bar'.
+     * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public boolean containsMissingValueInterpretations(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetMissingValueInterpretations().getMap().containsKey(key); + } + + /** Use {@link #getMissingValueInterpretationsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map< + java.lang.String, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> + getMissingValueInterpretations() { + return getMissingValueInterpretationsMap(); + } + + /** + * + * + *
+     * A map to indicate how to interpret missing value for some fields. Missing
+     * values are fields present in user schema but missing in rows. The key is
+     * the field name. The value is the interpretation of missing values for the
+     * field.
+     *
+     * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+     * missing values in field foo are interpreted as NULL, all missing values in
+     * field bar are interpreted as the default value of field bar in table
+     * schema.
+     *
+     * If a field is not in this map and has missing values, the missing values
+     * in this field are interpreted as NULL.
+     *
+     * This field only applies to the current request, it won't affect other
+     * requests on the connection.
+     *
+     * Currently, field name can only be top-level column name, can't be a struct
+     * field path like 'foo.bar'.
+     * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public java.util.Map< + java.lang.String, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> + getMissingValueInterpretationsMap() { + return internalGetAdaptedMissingValueInterpretationsMap( + internalGetMissingValueInterpretations().getMap()); + } + + /** + * + * + *
+     * A map to indicate how to interpret missing value for some fields. Missing
+     * values are fields present in user schema but missing in rows. The key is
+     * the field name. The value is the interpretation of missing values for the
+     * field.
+     *
+     * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+     * missing values in field foo are interpreted as NULL, all missing values in
+     * field bar are interpreted as the default value of field bar in table
+     * schema.
+     *
+     * If a field is not in this map and has missing values, the missing values
+     * in this field are interpreted as NULL.
+     *
+     * This field only applies to the current request, it won't affect other
+     * requests on the connection.
+     *
+     * Currently, field name can only be top-level column name, can't be a struct
+     * field path like 'foo.bar'.
+     * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public /* nullable */ com.google.cloud.bigquery.storage.v1.AppendRowsRequest + .MissingValueInterpretation + getMissingValueInterpretationsOrDefault( + java.lang.String key, + /* nullable */ + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMissingValueInterpretations().getMap(); + return map.containsKey(key) + ? missingValueInterpretationsValueConverter.doForward(map.get(key)) + : defaultValue; + } + + /** + * + * + *
+     * A map to indicate how to interpret missing value for some fields. Missing
+     * values are fields present in user schema but missing in rows. The key is
+     * the field name. The value is the interpretation of missing values for the
+     * field.
+     *
+     * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+     * missing values in field foo are interpreted as NULL, all missing values in
+     * field bar are interpreted as the default value of field bar in table
+     * schema.
+     *
+     * If a field is not in this map and has missing values, the missing values
+     * in this field are interpreted as NULL.
+     *
+     * This field only applies to the current request, it won't affect other
+     * requests on the connection.
+     *
+     * Currently, field name can only be top-level column name, can't be a struct
+     * field path like 'foo.bar'.
+     * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + getMissingValueInterpretationsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMissingValueInterpretations().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return missingValueInterpretationsValueConverter.doForward(map.get(key)); + } + + /** Use {@link #getMissingValueInterpretationsValueMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map + getMissingValueInterpretationsValue() { + return getMissingValueInterpretationsValueMap(); + } + + /** + * + * + *
+     * A map to indicate how to interpret missing value for some fields. Missing
+     * values are fields present in user schema but missing in rows. The key is
+     * the field name. The value is the interpretation of missing values for the
+     * field.
+     *
+     * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+     * missing values in field foo are interpreted as NULL, all missing values in
+     * field bar are interpreted as the default value of field bar in table
+     * schema.
+     *
+     * If a field is not in this map and has missing values, the missing values
+     * in this field are interpreted as NULL.
+     *
+     * This field only applies to the current request, it won't affect other
+     * requests on the connection.
+     *
+     * Currently, field name can only be top-level column name, can't be a struct
+     * field path like 'foo.bar'.
+     * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public java.util.Map + getMissingValueInterpretationsValueMap() { + return internalGetMissingValueInterpretations().getMap(); + } + + /** + * + * + *
+     * A map to indicate how to interpret missing value for some fields. Missing
+     * values are fields present in user schema but missing in rows. The key is
+     * the field name. The value is the interpretation of missing values for the
+     * field.
+     *
+     * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+     * missing values in field foo are interpreted as NULL, all missing values in
+     * field bar are interpreted as the default value of field bar in table
+     * schema.
+     *
+     * If a field is not in this map and has missing values, the missing values
+     * in this field are interpreted as NULL.
+     *
+     * This field only applies to the current request, it won't affect other
+     * requests on the connection.
+     *
+     * Currently, field name can only be top-level column name, can't be a struct
+     * field path like 'foo.bar'.
+     * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public int getMissingValueInterpretationsValueOrDefault( + java.lang.String key, int defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMissingValueInterpretations().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+     * A map to indicate how to interpret missing value for some fields. Missing
+     * values are fields present in user schema but missing in rows. The key is
+     * the field name. The value is the interpretation of missing values for the
+     * field.
+     *
+     * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+     * missing values in field foo are interpreted as NULL, all missing values in
+     * field bar are interpreted as the default value of field bar in table
+     * schema.
+     *
+     * If a field is not in this map and has missing values, the missing values
+     * in this field are interpreted as NULL.
+     *
+     * This field only applies to the current request, it won't affect other
+     * requests on the connection.
+     *
+     * Currently, field name can only be top-level column name, can't be a struct
+     * field path like 'foo.bar'.
+     * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + @java.lang.Override + public int getMissingValueInterpretationsValueOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMissingValueInterpretations().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearMissingValueInterpretations() { + bitField0_ = (bitField0_ & ~0x00000020); + internalGetMutableMissingValueInterpretations().getMutableMap().clear(); + return this; + } + + /** + * + * + *
+     * A map to indicate how to interpret missing value for some fields. Missing
+     * values are fields present in user schema but missing in rows. The key is
+     * the field name. The value is the interpretation of missing values for the
+     * field.
+     *
+     * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+     * missing values in field foo are interpreted as NULL, all missing values in
+     * field bar are interpreted as the default value of field bar in table
+     * schema.
+     *
+     * If a field is not in this map and has missing values, the missing values
+     * in this field are interpreted as NULL.
+     *
+     * This field only applies to the current request, it won't affect other
+     * requests on the connection.
+     *
+     * Currently, field name can only be top-level column name, can't be a struct
+     * field path like 'foo.bar'.
+     * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + public Builder removeMissingValueInterpretations(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableMissingValueInterpretations().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map< + java.lang.String, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> + getMutableMissingValueInterpretations() { + bitField0_ |= 0x00000020; + return internalGetAdaptedMissingValueInterpretationsMap( + internalGetMutableMissingValueInterpretations().getMutableMap()); + } + + /** + * + * + *
+     * A map to indicate how to interpret missing value for some fields. Missing
+     * values are fields present in user schema but missing in rows. The key is
+     * the field name. The value is the interpretation of missing values for the
+     * field.
+     *
+     * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+     * missing values in field foo are interpreted as NULL, all missing values in
+     * field bar are interpreted as the default value of field bar in table
+     * schema.
+     *
+     * If a field is not in this map and has missing values, the missing values
+     * in this field are interpreted as NULL.
+     *
+     * This field only applies to the current request, it won't affect other
+     * requests on the connection.
+     *
+     * Currently, field name can only be top-level column name, can't be a struct
+     * field path like 'foo.bar'.
+     * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + public Builder putMissingValueInterpretations( + java.lang.String key, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation value) { + if (key == null) { + throw new NullPointerException("map key"); + } + + internalGetMutableMissingValueInterpretations() + .getMutableMap() + .put(key, missingValueInterpretationsValueConverter.doBackward(value)); + bitField0_ |= 0x00000020; + return this; + } + + /** + * + * + *
+     * A map to indicate how to interpret missing value for some fields. Missing
+     * values are fields present in user schema but missing in rows. The key is
+     * the field name. The value is the interpretation of missing values for the
+     * field.
+     *
+     * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+     * missing values in field foo are interpreted as NULL, all missing values in
+     * field bar are interpreted as the default value of field bar in table
+     * schema.
+     *
+     * If a field is not in this map and has missing values, the missing values
+     * in this field are interpreted as NULL.
+     *
+     * This field only applies to the current request, it won't affect other
+     * requests on the connection.
+     *
+     * Currently, field name can only be top-level column name, can't be a struct
+     * field path like 'foo.bar'.
+     * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + public Builder putAllMissingValueInterpretations( + java.util.Map< + java.lang.String, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> + values) { + internalGetAdaptedMissingValueInterpretationsMap( + internalGetMutableMissingValueInterpretations().getMutableMap()) + .putAll(values); + bitField0_ |= 0x00000020; + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map + getMutableMissingValueInterpretationsValue() { + bitField0_ |= 0x00000020; + return internalGetMutableMissingValueInterpretations().getMutableMap(); + } + + /** + * + * + *
+     * A map to indicate how to interpret missing value for some fields. Missing
+     * values are fields present in user schema but missing in rows. The key is
+     * the field name. The value is the interpretation of missing values for the
+     * field.
+     *
+     * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+     * missing values in field foo are interpreted as NULL, all missing values in
+     * field bar are interpreted as the default value of field bar in table
+     * schema.
+     *
+     * If a field is not in this map and has missing values, the missing values
+     * in this field are interpreted as NULL.
+     *
+     * This field only applies to the current request, it won't affect other
+     * requests on the connection.
+     *
+     * Currently, field name can only be top-level column name, can't be a struct
+     * field path like 'foo.bar'.
+     * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + public Builder putMissingValueInterpretationsValue(java.lang.String key, int value) { + if (key == null) { + throw new NullPointerException("map key"); + } + + internalGetMutableMissingValueInterpretations().getMutableMap().put(key, value); + bitField0_ |= 0x00000020; + return this; + } + + /** + * + * + *
+     * A map to indicate how to interpret missing value for some fields. Missing
+     * values are fields present in user schema but missing in rows. The key is
+     * the field name. The value is the interpretation of missing values for the
+     * field.
+     *
+     * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+     * missing values in field foo are interpreted as NULL, all missing values in
+     * field bar are interpreted as the default value of field bar in table
+     * schema.
+     *
+     * If a field is not in this map and has missing values, the missing values
+     * in this field are interpreted as NULL.
+     *
+     * This field only applies to the current request, it won't affect other
+     * requests on the connection.
+     *
+     * Currently, field name can only be top-level column name, can't be a struct
+     * field path like 'foo.bar'.
+     * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + public Builder putAllMissingValueInterpretationsValue( + java.util.Map values) { + internalGetMutableMissingValueInterpretations().getMutableMap().putAll(values); + bitField0_ |= 0x00000020; + return this; + } + + private int defaultMissingValueInterpretation_ = 0; + + /** + * + * + *
+     * Optional. Default missing value interpretation for all columns in the
+     * table. When a value is specified on an `AppendRowsRequest`, it is applied
+     * to all requests from that point forward, until a subsequent
+     * `AppendRowsRequest` sets it to a different value.
+     * `missing_value_interpretation` can override
+     * `default_missing_value_interpretation`. For example, if you want to write
+     * `NULL` instead of using default values for some columns, you can set
+     * `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same
+     * time, set `missing_value_interpretations` to `NULL_VALUE` on those columns.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation default_missing_value_interpretation = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for defaultMissingValueInterpretation. + */ + @java.lang.Override + public int getDefaultMissingValueInterpretationValue() { + return defaultMissingValueInterpretation_; + } + + /** + * + * + *
+     * Optional. Default missing value interpretation for all columns in the
+     * table. When a value is specified on an `AppendRowsRequest`, it is applied
+     * to all requests from that point forward, until a subsequent
+     * `AppendRowsRequest` sets it to a different value.
+     * `missing_value_interpretation` can override
+     * `default_missing_value_interpretation`. For example, if you want to write
+     * `NULL` instead of using default values for some columns, you can set
+     * `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same
+     * time, set `missing_value_interpretations` to `NULL_VALUE` on those columns.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation default_missing_value_interpretation = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for defaultMissingValueInterpretation to set. + * @return This builder for chaining. + */ + public Builder setDefaultMissingValueInterpretationValue(int value) { + defaultMissingValueInterpretation_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Default missing value interpretation for all columns in the
+     * table. When a value is specified on an `AppendRowsRequest`, it is applied
+     * to all requests from that point forward, until a subsequent
+     * `AppendRowsRequest` sets it to a different value.
+     * `missing_value_interpretation` can override
+     * `default_missing_value_interpretation`. For example, if you want to write
+     * `NULL` instead of using default values for some columns, you can set
+     * `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same
+     * time, set `missing_value_interpretations` to `NULL_VALUE` on those columns.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation default_missing_value_interpretation = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The defaultMissingValueInterpretation. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + getDefaultMissingValueInterpretation() { + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation result = + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + .forNumber(defaultMissingValueInterpretation_); + return result == null + ? com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + .UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Optional. Default missing value interpretation for all columns in the
+     * table. When a value is specified on an `AppendRowsRequest`, it is applied
+     * to all requests from that point forward, until a subsequent
+     * `AppendRowsRequest` sets it to a different value.
+     * `missing_value_interpretation` can override
+     * `default_missing_value_interpretation`. For example, if you want to write
+     * `NULL` instead of using default values for some columns, you can set
+     * `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same
+     * time, set `missing_value_interpretations` to `NULL_VALUE` on those columns.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation default_missing_value_interpretation = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The defaultMissingValueInterpretation to set. + * @return This builder for chaining. + */ + public Builder setDefaultMissingValueInterpretation( + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000040; + defaultMissingValueInterpretation_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Default missing value interpretation for all columns in the
+     * table. When a value is specified on an `AppendRowsRequest`, it is applied
+     * to all requests from that point forward, until a subsequent
+     * `AppendRowsRequest` sets it to a different value.
+     * `missing_value_interpretation` can override
+     * `default_missing_value_interpretation`. For example, if you want to write
+     * `NULL` instead of using default values for some columns, you can set
+     * `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same
+     * time, set `missing_value_interpretations` to `NULL_VALUE` on those columns.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation default_missing_value_interpretation = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearDefaultMissingValueInterpretation() { + bitField0_ = (bitField0_ & ~0x00000040); + defaultMissingValueInterpretation_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.AppendRowsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.AppendRowsRequest) + private static final com.google.cloud.bigquery.storage.v1.AppendRowsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.AppendRowsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AppendRowsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java new file mode 100644 index 000000000000..3c8ace9f6530 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java @@ -0,0 +1,576 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface AppendRowsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.AppendRowsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
+   *
+   * For explicitly created write streams, the format is:
+   *
+   * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+   *
+   * For the special default stream, the format is:
+   *
+   * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + java.lang.String getWriteStream(); + + /** + * + * + *
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
+   *
+   * For explicitly created write streams, the format is:
+   *
+   * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
+   *
+   * For the special default stream, the format is:
+   *
+   * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + com.google.protobuf.ByteString getWriteStreamBytes(); + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + boolean hasOffset(); + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + com.google.protobuf.Int64Value getOffset(); + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder(); + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return Whether the protoRows field is set. + */ + boolean hasProtoRows(); + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return The protoRows. + */ + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData getProtoRows(); + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData proto_rows = 4; + */ + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoDataOrBuilder getProtoRowsOrBuilder(); + + /** + * + * + *
+   * Rows in arrow format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + * + * @return Whether the arrowRows field is set. + */ + boolean hasArrowRows(); + + /** + * + * + *
+   * Rows in arrow format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + * + * @return The arrowRows. + */ + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData getArrowRows(); + + /** + * + * + *
+   * Rows in arrow format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5; + */ + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowDataOrBuilder getArrowRowsOrBuilder(); + + /** + * + * + *
+   * Id set by client to annotate its identity. Only initial request setting is
+   * respected.
+   * 
+ * + * string trace_id = 6; + * + * @return The traceId. + */ + java.lang.String getTraceId(); + + /** + * + * + *
+   * Id set by client to annotate its identity. Only initial request setting is
+   * respected.
+   * 
+ * + * string trace_id = 6; + * + * @return The bytes for traceId. + */ + com.google.protobuf.ByteString getTraceIdBytes(); + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + int getMissingValueInterpretationsCount(); + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + boolean containsMissingValueInterpretations(java.lang.String key); + + /** Use {@link #getMissingValueInterpretationsMap()} instead. */ + @java.lang.Deprecated + java.util.Map< + java.lang.String, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> + getMissingValueInterpretations(); + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + java.util.Map< + java.lang.String, + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> + getMissingValueInterpretationsMap(); + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + /* nullable */ + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + getMissingValueInterpretationsOrDefault( + java.lang.String key, + /* nullable */ + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + defaultValue); + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + getMissingValueInterpretationsOrThrow(java.lang.String key); + + /** Use {@link #getMissingValueInterpretationsValueMap()} instead. */ + @java.lang.Deprecated + java.util.Map getMissingValueInterpretationsValue(); + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + java.util.Map getMissingValueInterpretationsValueMap(); + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + int getMissingValueInterpretationsValueOrDefault(java.lang.String key, int defaultValue); + + /** + * + * + *
+   * A map to indicate how to interpret missing value for some fields. Missing
+   * values are fields present in user schema but missing in rows. The key is
+   * the field name. The value is the interpretation of missing values for the
+   * field.
+   *
+   * For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
+   * missing values in field foo are interpreted as NULL, all missing values in
+   * field bar are interpreted as the default value of field bar in table
+   * schema.
+   *
+   * If a field is not in this map and has missing values, the missing values
+   * in this field are interpreted as NULL.
+   *
+   * This field only applies to the current request, it won't affect other
+   * requests on the connection.
+   *
+   * Currently, field name can only be top-level column name, can't be a struct
+   * field path like 'foo.bar'.
+   * 
+ * + * + * map<string, .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation> missing_value_interpretations = 7; + * + */ + int getMissingValueInterpretationsValueOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. Default missing value interpretation for all columns in the
+   * table. When a value is specified on an `AppendRowsRequest`, it is applied
+   * to all requests from that point forward, until a subsequent
+   * `AppendRowsRequest` sets it to a different value.
+   * `missing_value_interpretation` can override
+   * `default_missing_value_interpretation`. For example, if you want to write
+   * `NULL` instead of using default values for some columns, you can set
+   * `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same
+   * time, set `missing_value_interpretations` to `NULL_VALUE` on those columns.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation default_missing_value_interpretation = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for defaultMissingValueInterpretation. + */ + int getDefaultMissingValueInterpretationValue(); + + /** + * + * + *
+   * Optional. Default missing value interpretation for all columns in the
+   * table. When a value is specified on an `AppendRowsRequest`, it is applied
+   * to all requests from that point forward, until a subsequent
+   * `AppendRowsRequest` sets it to a different value.
+   * `missing_value_interpretation` can override
+   * `default_missing_value_interpretation`. For example, if you want to write
+   * `NULL` instead of using default values for some columns, you can set
+   * `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same
+   * time, set `missing_value_interpretations` to `NULL_VALUE` on those columns.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation default_missing_value_interpretation = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The defaultMissingValueInterpretation. + */ + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation + getDefaultMissingValueInterpretation(); + + com.google.cloud.bigquery.storage.v1.AppendRowsRequest.RowsCase getRowsCase(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java new file mode 100644 index 000000000000..1e450082c37c --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java @@ -0,0 +1,3252 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Response message for `AppendRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsResponse} + */ +public final class AppendRowsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.AppendRowsResponse) + AppendRowsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AppendRowsResponse.newBuilder() to construct. + private AppendRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AppendRowsResponse() { + rowErrors_ = java.util.Collections.emptyList(); + writeStream_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AppendRowsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.class, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.Builder.class); + } + + public interface AppendResultOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return Whether the offset field is set. + */ + boolean hasOffset(); + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return The offset. + */ + com.google.protobuf.Int64Value getOffset(); + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder(); + } + + /** + * + * + *
+   * AppendResult is returned for successful append requests.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult} + */ + public static final class AppendResult extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + AppendResultOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AppendResult.newBuilder() to construct. + private AppendResult(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AppendResult() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AppendResult(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.class, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder.class); + } + + private int bitField0_; + public static final int OFFSET_FIELD_NUMBER = 1; + private com.google.protobuf.Int64Value offset_; + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return Whether the offset field is set. + */ + @java.lang.Override + public boolean hasOffset() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return The offset. + */ + @java.lang.Override + public com.google.protobuf.Int64Value getOffset() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + @java.lang.Override + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getOffset()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getOffset()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult other = + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) obj; + + if (hasOffset() != other.hasOffset()) return false; + if (hasOffset()) { + if (!getOffset().equals(other.getOffset())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasOffset()) { + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + getOffset().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * AppendResult is returned for successful append requests.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.class, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getOffsetFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + offset_ = null; + if (offsetBuilder_ != null) { + offsetBuilder_.dispose(); + offsetBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult build() { + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult buildPartial() { + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult result = + new com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.offset_ = offsetBuilder_ == null ? offset_ : offsetBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult other) { + if (other + == com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance()) return this; + if (other.hasOffset()) { + mergeOffset(other.getOffset()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getOffsetFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Int64Value offset_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + offsetBuilder_; + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return Whether the offset field is set. + */ + public boolean hasOffset() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return The offset. + */ + public com.google.protobuf.Int64Value getOffset() { + if (offsetBuilder_ == null) { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } else { + return offsetBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public Builder setOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + offset_ = value; + } else { + offsetBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public Builder setOffset(com.google.protobuf.Int64Value.Builder builderForValue) { + if (offsetBuilder_ == null) { + offset_ = builderForValue.build(); + } else { + offsetBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public Builder mergeOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && offset_ != null + && offset_ != com.google.protobuf.Int64Value.getDefaultInstance()) { + getOffsetBuilder().mergeFrom(value); + } else { + offset_ = value; + } + } else { + offsetBuilder_.mergeFrom(value); + } + if (offset_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public Builder clearOffset() { + bitField0_ = (bitField0_ & ~0x00000001); + offset_ = null; + if (offsetBuilder_ != null) { + offsetBuilder_.dispose(); + offsetBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public com.google.protobuf.Int64Value.Builder getOffsetBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getOffsetFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + if (offsetBuilder_ != null) { + return offsetBuilder_.getMessageOrBuilder(); + } else { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + getOffsetFieldBuilder() { + if (offsetBuilder_ == null) { + offsetBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder>( + getOffset(), getParentForChildren(), isClean()); + offset_ = null; + } + return offsetBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + private static final com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult(); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AppendResult parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + private int responseCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object response_; + + public enum ResponseCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + APPEND_RESULT(1), + ERROR(2), + RESPONSE_NOT_SET(0); + private final int value; + + private ResponseCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ResponseCase valueOf(int value) { + return forNumber(value); + } + + public static ResponseCase forNumber(int value) { + switch (value) { + case 1: + return APPEND_RESULT; + case 2: + return ERROR; + case 0: + return RESPONSE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ResponseCase getResponseCase() { + return ResponseCase.forNumber(responseCase_); + } + + public static final int APPEND_RESULT_FIELD_NUMBER = 1; + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return Whether the appendResult field is set. + */ + @java.lang.Override + public boolean hasAppendResult() { + return responseCase_ == 1; + } + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return The appendResult. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult getAppendResult() { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder + getAppendResultOrBuilder() { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + + public static final int ERROR_FIELD_NUMBER = 2; + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   *
+   * Additional information about error signalling:
+   *
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   *
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   *
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   *
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   *
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + * + * @return Whether the error field is set. + */ + @java.lang.Override + public boolean hasError() { + return responseCase_ == 2; + } + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   *
+   * Additional information about error signalling:
+   *
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   *
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   *
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   *
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   *
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + * + * @return The error. + */ + @java.lang.Override + public com.google.rpc.Status getError() { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   *
+   * Additional information about error signalling:
+   *
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   *
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   *
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   *
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   *
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } + + public static final int UPDATED_SCHEMA_FIELD_NUMBER = 3; + private com.google.cloud.bigquery.storage.v1.TableSchema updatedSchema_; + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + * + * @return Whether the updatedSchema field is set. + */ + @java.lang.Override + public boolean hasUpdatedSchema() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + * + * @return The updatedSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchema getUpdatedSchema() { + return updatedSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : updatedSchema_; + } + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getUpdatedSchemaOrBuilder() { + return updatedSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : updatedSchema_; + } + + public static final int ROW_ERRORS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List rowErrors_; + + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + @java.lang.Override + public java.util.List getRowErrorsList() { + return rowErrors_; + } + + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + @java.lang.Override + public java.util.List + getRowErrorsOrBuilderList() { + return rowErrors_; + } + + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + @java.lang.Override + public int getRowErrorsCount() { + return rowErrors_.size(); + } + + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError getRowErrors(int index) { + return rowErrors_.get(index); + } + + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder getRowErrorsOrBuilder(int index) { + return rowErrors_.get(index); + } + + public static final int WRITE_STREAM_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object writeStream_ = ""; + + /** + * + * + *
+   * The target of the append operation. Matches the write_stream in the
+   * corresponding request.
+   * 
+ * + * string write_stream = 5; + * + * @return The writeStream. + */ + @java.lang.Override + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } + } + + /** + * + * + *
+   * The target of the append operation. Matches the write_stream in the
+   * corresponding request.
+   * 
+ * + * string write_stream = 5; + * + * @return The bytes for writeStream. + */ + @java.lang.Override + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (responseCase_ == 1) { + output.writeMessage( + 1, (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_); + } + if (responseCase_ == 2) { + output.writeMessage(2, (com.google.rpc.Status) response_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getUpdatedSchema()); + } + for (int i = 0; i < rowErrors_.size(); i++) { + output.writeMessage(4, rowErrors_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, writeStream_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (responseCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_); + } + if (responseCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.rpc.Status) response_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getUpdatedSchema()); + } + for (int i = 0; i < rowErrors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, rowErrors_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, writeStream_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.AppendRowsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.AppendRowsResponse other = + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse) obj; + + if (hasUpdatedSchema() != other.hasUpdatedSchema()) return false; + if (hasUpdatedSchema()) { + if (!getUpdatedSchema().equals(other.getUpdatedSchema())) return false; + } + if (!getRowErrorsList().equals(other.getRowErrorsList())) return false; + if (!getWriteStream().equals(other.getWriteStream())) return false; + if (!getResponseCase().equals(other.getResponseCase())) return false; + switch (responseCase_) { + case 1: + if (!getAppendResult().equals(other.getAppendResult())) return false; + break; + case 2: + if (!getError().equals(other.getError())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasUpdatedSchema()) { + hash = (37 * hash) + UPDATED_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getUpdatedSchema().hashCode(); + } + if (getRowErrorsCount() > 0) { + hash = (37 * hash) + ROW_ERRORS_FIELD_NUMBER; + hash = (53 * hash) + getRowErrorsList().hashCode(); + } + hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getWriteStream().hashCode(); + switch (responseCase_) { + case 1: + hash = (37 * hash) + APPEND_RESULT_FIELD_NUMBER; + hash = (53 * hash) + getAppendResult().hashCode(); + break; + case 2: + hash = (37 * hash) + ERROR_FIELD_NUMBER; + hash = (53 * hash) + getError().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for `AppendRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.AppendRowsResponse) + com.google.cloud.bigquery.storage.v1.AppendRowsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.class, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.AppendRowsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getUpdatedSchemaFieldBuilder(); + getRowErrorsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (appendResultBuilder_ != null) { + appendResultBuilder_.clear(); + } + if (errorBuilder_ != null) { + errorBuilder_.clear(); + } + updatedSchema_ = null; + if (updatedSchemaBuilder_ != null) { + updatedSchemaBuilder_.dispose(); + updatedSchemaBuilder_ = null; + } + if (rowErrorsBuilder_ == null) { + rowErrors_ = java.util.Collections.emptyList(); + } else { + rowErrors_ = null; + rowErrorsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + writeStream_ = ""; + responseCase_ = 0; + response_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse build() { + com.google.cloud.bigquery.storage.v1.AppendRowsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1.AppendRowsResponse result = + new com.google.cloud.bigquery.storage.v1.AppendRowsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse result) { + if (rowErrorsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + rowErrors_ = java.util.Collections.unmodifiableList(rowErrors_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.rowErrors_ = rowErrors_; + } else { + result.rowErrors_ = rowErrorsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.AppendRowsResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.updatedSchema_ = + updatedSchemaBuilder_ == null ? updatedSchema_ : updatedSchemaBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.writeStream_ = writeStream_; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse result) { + result.responseCase_ = responseCase_; + result.response_ = this.response_; + if (responseCase_ == 1 && appendResultBuilder_ != null) { + result.response_ = appendResultBuilder_.build(); + } + if (responseCase_ == 2 && errorBuilder_ != null) { + result.response_ = errorBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.AppendRowsResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.AppendRowsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.AppendRowsResponse other) { + if (other == com.google.cloud.bigquery.storage.v1.AppendRowsResponse.getDefaultInstance()) + return this; + if (other.hasUpdatedSchema()) { + mergeUpdatedSchema(other.getUpdatedSchema()); + } + if (rowErrorsBuilder_ == null) { + if (!other.rowErrors_.isEmpty()) { + if (rowErrors_.isEmpty()) { + rowErrors_ = other.rowErrors_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureRowErrorsIsMutable(); + rowErrors_.addAll(other.rowErrors_); + } + onChanged(); + } + } else { + if (!other.rowErrors_.isEmpty()) { + if (rowErrorsBuilder_.isEmpty()) { + rowErrorsBuilder_.dispose(); + rowErrorsBuilder_ = null; + rowErrors_ = other.rowErrors_; + bitField0_ = (bitField0_ & ~0x00000008); + rowErrorsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getRowErrorsFieldBuilder() + : null; + } else { + rowErrorsBuilder_.addAllMessages(other.rowErrors_); + } + } + } + if (!other.getWriteStream().isEmpty()) { + writeStream_ = other.writeStream_; + bitField0_ |= 0x00000010; + onChanged(); + } + switch (other.getResponseCase()) { + case APPEND_RESULT: + { + mergeAppendResult(other.getAppendResult()); + break; + } + case ERROR: + { + mergeError(other.getError()); + break; + } + case RESPONSE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getAppendResultFieldBuilder().getBuilder(), extensionRegistry); + responseCase_ = 1; + break; + } // case 10 + case 18: + { + input.readMessage(getErrorFieldBuilder().getBuilder(), extensionRegistry); + responseCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage(getUpdatedSchemaFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.cloud.bigquery.storage.v1.RowError m = + input.readMessage( + com.google.cloud.bigquery.storage.v1.RowError.parser(), extensionRegistry); + if (rowErrorsBuilder_ == null) { + ensureRowErrorsIsMutable(); + rowErrors_.add(m); + } else { + rowErrorsBuilder_.addMessage(m); + } + break; + } // case 34 + case 42: + { + writeStream_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int responseCase_ = 0; + private java.lang.Object response_; + + public ResponseCase getResponseCase() { + return ResponseCase.forNumber(responseCase_); + } + + public Builder clearResponse() { + responseCase_ = 0; + response_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder> + appendResultBuilder_; + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return Whether the appendResult field is set. + */ + @java.lang.Override + public boolean hasAppendResult() { + return responseCase_ == 1; + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return The appendResult. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult getAppendResult() { + if (appendResultBuilder_ == null) { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } else { + if (responseCase_ == 1) { + return appendResultBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public Builder setAppendResult( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult value) { + if (appendResultBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + response_ = value; + onChanged(); + } else { + appendResultBuilder_.setMessage(value); + } + responseCase_ = 1; + return this; + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public Builder setAppendResult( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder + builderForValue) { + if (appendResultBuilder_ == null) { + response_ = builderForValue.build(); + onChanged(); + } else { + appendResultBuilder_.setMessage(builderForValue.build()); + } + responseCase_ = 1; + return this; + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public Builder mergeAppendResult( + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult value) { + if (appendResultBuilder_ == null) { + if (responseCase_ == 1 + && response_ + != com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance()) { + response_ = + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.newBuilder( + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) + response_) + .mergeFrom(value) + .buildPartial(); + } else { + response_ = value; + } + onChanged(); + } else { + if (responseCase_ == 1) { + appendResultBuilder_.mergeFrom(value); + } else { + appendResultBuilder_.setMessage(value); + } + } + responseCase_ = 1; + return this; + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public Builder clearAppendResult() { + if (appendResultBuilder_ == null) { + if (responseCase_ == 1) { + responseCase_ = 0; + response_ = null; + onChanged(); + } + } else { + if (responseCase_ == 1) { + responseCase_ = 0; + response_ = null; + } + appendResultBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder + getAppendResultBuilder() { + return getAppendResultFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder + getAppendResultOrBuilder() { + if ((responseCase_ == 1) && (appendResultBuilder_ != null)) { + return appendResultBuilder_.getMessageOrBuilder(); + } else { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_; + } + return com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder> + getAppendResultFieldBuilder() { + if (appendResultBuilder_ == null) { + if (!(responseCase_ == 1)) { + response_ = + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + appendResultBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder, + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder>( + (com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult) response_, + getParentForChildren(), + isClean()); + response_ = null; + } + responseCase_ = 1; + onChanged(); + return appendResultBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + errorBuilder_; + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + * + * @return Whether the error field is set. + */ + @java.lang.Override + public boolean hasError() { + return responseCase_ == 2; + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + * + * @return The error. + */ + @java.lang.Override + public com.google.rpc.Status getError() { + if (errorBuilder_ == null) { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } else { + if (responseCase_ == 2) { + return errorBuilder_.getMessage(); + } + return com.google.rpc.Status.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder setError(com.google.rpc.Status value) { + if (errorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + response_ = value; + onChanged(); + } else { + errorBuilder_.setMessage(value); + } + responseCase_ = 2; + return this; + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder setError(com.google.rpc.Status.Builder builderForValue) { + if (errorBuilder_ == null) { + response_ = builderForValue.build(); + onChanged(); + } else { + errorBuilder_.setMessage(builderForValue.build()); + } + responseCase_ = 2; + return this; + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder mergeError(com.google.rpc.Status value) { + if (errorBuilder_ == null) { + if (responseCase_ == 2 && response_ != com.google.rpc.Status.getDefaultInstance()) { + response_ = + com.google.rpc.Status.newBuilder((com.google.rpc.Status) response_) + .mergeFrom(value) + .buildPartial(); + } else { + response_ = value; + } + onChanged(); + } else { + if (responseCase_ == 2) { + errorBuilder_.mergeFrom(value); + } else { + errorBuilder_.setMessage(value); + } + } + responseCase_ = 2; + return this; + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder clearError() { + if (errorBuilder_ == null) { + if (responseCase_ == 2) { + responseCase_ = 0; + response_ = null; + onChanged(); + } + } else { + if (responseCase_ == 2) { + responseCase_ = 0; + response_ = null; + } + errorBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public com.google.rpc.Status.Builder getErrorBuilder() { + return getErrorFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { + if ((responseCase_ == 2) && (errorBuilder_ != null)) { + return errorBuilder_.getMessageOrBuilder(); + } else { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + getErrorFieldBuilder() { + if (errorBuilder_ == null) { + if (!(responseCase_ == 2)) { + response_ = com.google.rpc.Status.getDefaultInstance(); + } + errorBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, + com.google.rpc.Status.Builder, + com.google.rpc.StatusOrBuilder>( + (com.google.rpc.Status) response_, getParentForChildren(), isClean()); + response_ = null; + } + responseCase_ = 2; + onChanged(); + return errorBuilder_; + } + + private com.google.cloud.bigquery.storage.v1.TableSchema updatedSchema_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableSchema, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder> + updatedSchemaBuilder_; + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + * + * @return Whether the updatedSchema field is set. + */ + public boolean hasUpdatedSchema() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + * + * @return The updatedSchema. + */ + public com.google.cloud.bigquery.storage.v1.TableSchema getUpdatedSchema() { + if (updatedSchemaBuilder_ == null) { + return updatedSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : updatedSchema_; + } else { + return updatedSchemaBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + public Builder setUpdatedSchema(com.google.cloud.bigquery.storage.v1.TableSchema value) { + if (updatedSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updatedSchema_ = value; + } else { + updatedSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + public Builder setUpdatedSchema( + com.google.cloud.bigquery.storage.v1.TableSchema.Builder builderForValue) { + if (updatedSchemaBuilder_ == null) { + updatedSchema_ = builderForValue.build(); + } else { + updatedSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + public Builder mergeUpdatedSchema(com.google.cloud.bigquery.storage.v1.TableSchema value) { + if (updatedSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && updatedSchema_ != null + && updatedSchema_ + != com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance()) { + getUpdatedSchemaBuilder().mergeFrom(value); + } else { + updatedSchema_ = value; + } + } else { + updatedSchemaBuilder_.mergeFrom(value); + } + if (updatedSchema_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + public Builder clearUpdatedSchema() { + bitField0_ = (bitField0_ & ~0x00000004); + updatedSchema_ = null; + if (updatedSchemaBuilder_ != null) { + updatedSchemaBuilder_.dispose(); + updatedSchemaBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + public com.google.cloud.bigquery.storage.v1.TableSchema.Builder getUpdatedSchemaBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getUpdatedSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + public com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getUpdatedSchemaOrBuilder() { + if (updatedSchemaBuilder_ != null) { + return updatedSchemaBuilder_.getMessageOrBuilder(); + } else { + return updatedSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : updatedSchema_; + } + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableSchema, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder> + getUpdatedSchemaFieldBuilder() { + if (updatedSchemaBuilder_ == null) { + updatedSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableSchema, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder>( + getUpdatedSchema(), getParentForChildren(), isClean()); + updatedSchema_ = null; + } + return updatedSchemaBuilder_; + } + + private java.util.List rowErrors_ = + java.util.Collections.emptyList(); + + private void ensureRowErrorsIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + rowErrors_ = + new java.util.ArrayList(rowErrors_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.RowError, + com.google.cloud.bigquery.storage.v1.RowError.Builder, + com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder> + rowErrorsBuilder_; + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public java.util.List getRowErrorsList() { + if (rowErrorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(rowErrors_); + } else { + return rowErrorsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public int getRowErrorsCount() { + if (rowErrorsBuilder_ == null) { + return rowErrors_.size(); + } else { + return rowErrorsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public com.google.cloud.bigquery.storage.v1.RowError getRowErrors(int index) { + if (rowErrorsBuilder_ == null) { + return rowErrors_.get(index); + } else { + return rowErrorsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder setRowErrors(int index, com.google.cloud.bigquery.storage.v1.RowError value) { + if (rowErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowErrorsIsMutable(); + rowErrors_.set(index, value); + onChanged(); + } else { + rowErrorsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder setRowErrors( + int index, com.google.cloud.bigquery.storage.v1.RowError.Builder builderForValue) { + if (rowErrorsBuilder_ == null) { + ensureRowErrorsIsMutable(); + rowErrors_.set(index, builderForValue.build()); + onChanged(); + } else { + rowErrorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder addRowErrors(com.google.cloud.bigquery.storage.v1.RowError value) { + if (rowErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowErrorsIsMutable(); + rowErrors_.add(value); + onChanged(); + } else { + rowErrorsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder addRowErrors(int index, com.google.cloud.bigquery.storage.v1.RowError value) { + if (rowErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowErrorsIsMutable(); + rowErrors_.add(index, value); + onChanged(); + } else { + rowErrorsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder addRowErrors( + com.google.cloud.bigquery.storage.v1.RowError.Builder builderForValue) { + if (rowErrorsBuilder_ == null) { + ensureRowErrorsIsMutable(); + rowErrors_.add(builderForValue.build()); + onChanged(); + } else { + rowErrorsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder addRowErrors( + int index, com.google.cloud.bigquery.storage.v1.RowError.Builder builderForValue) { + if (rowErrorsBuilder_ == null) { + ensureRowErrorsIsMutable(); + rowErrors_.add(index, builderForValue.build()); + onChanged(); + } else { + rowErrorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder addAllRowErrors( + java.lang.Iterable values) { + if (rowErrorsBuilder_ == null) { + ensureRowErrorsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, rowErrors_); + onChanged(); + } else { + rowErrorsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder clearRowErrors() { + if (rowErrorsBuilder_ == null) { + rowErrors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + rowErrorsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder removeRowErrors(int index) { + if (rowErrorsBuilder_ == null) { + ensureRowErrorsIsMutable(); + rowErrors_.remove(index); + onChanged(); + } else { + rowErrorsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public com.google.cloud.bigquery.storage.v1.RowError.Builder getRowErrorsBuilder(int index) { + return getRowErrorsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder getRowErrorsOrBuilder(int index) { + if (rowErrorsBuilder_ == null) { + return rowErrors_.get(index); + } else { + return rowErrorsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public java.util.List + getRowErrorsOrBuilderList() { + if (rowErrorsBuilder_ != null) { + return rowErrorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(rowErrors_); + } + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public com.google.cloud.bigquery.storage.v1.RowError.Builder addRowErrorsBuilder() { + return getRowErrorsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1.RowError.getDefaultInstance()); + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public com.google.cloud.bigquery.storage.v1.RowError.Builder addRowErrorsBuilder(int index) { + return getRowErrorsFieldBuilder() + .addBuilder(index, com.google.cloud.bigquery.storage.v1.RowError.getDefaultInstance()); + } + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public java.util.List + getRowErrorsBuilderList() { + return getRowErrorsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.RowError, + com.google.cloud.bigquery.storage.v1.RowError.Builder, + com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder> + getRowErrorsFieldBuilder() { + if (rowErrorsBuilder_ == null) { + rowErrorsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.RowError, + com.google.cloud.bigquery.storage.v1.RowError.Builder, + com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder>( + rowErrors_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); + rowErrors_ = null; + } + return rowErrorsBuilder_; + } + + private java.lang.Object writeStream_ = ""; + + /** + * + * + *
+     * The target of the append operation. Matches the write_stream in the
+     * corresponding request.
+     * 
+ * + * string write_stream = 5; + * + * @return The writeStream. + */ + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * The target of the append operation. Matches the write_stream in the
+     * corresponding request.
+     * 
+ * + * string write_stream = 5; + * + * @return The bytes for writeStream. + */ + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * The target of the append operation. Matches the write_stream in the
+     * corresponding request.
+     * 
+ * + * string write_stream = 5; + * + * @param value The writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStream(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + writeStream_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * The target of the append operation. Matches the write_stream in the
+     * corresponding request.
+     * 
+ * + * string write_stream = 5; + * + * @return This builder for chaining. + */ + public Builder clearWriteStream() { + writeStream_ = getDefaultInstance().getWriteStream(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * The target of the append operation. Matches the write_stream in the
+     * corresponding request.
+     * 
+ * + * string write_stream = 5; + * + * @param value The bytes for writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + writeStream_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.AppendRowsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.AppendRowsResponse) + private static final com.google.cloud.bigquery.storage.v1.AppendRowsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.AppendRowsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1.AppendRowsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AppendRowsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java new file mode 100644 index 000000000000..fcf43e7a99bd --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java @@ -0,0 +1,300 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface AppendRowsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.AppendRowsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return Whether the appendResult field is set. + */ + boolean hasAppendResult(); + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return The appendResult. + */ + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult getAppendResult(); + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult append_result = 1; + * + */ + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResultOrBuilder + getAppendResultOrBuilder(); + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   *
+   * Additional information about error signalling:
+   *
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   *
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   *
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   *
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   *
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + * + * @return Whether the error field is set. + */ + boolean hasError(); + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   *
+   * Additional information about error signalling:
+   *
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   *
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   *
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   *
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   *
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + * + * @return The error. + */ + com.google.rpc.Status getError(); + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   *
+   * Additional information about error signalling:
+   *
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   *
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   *
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   *
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   *
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + */ + com.google.rpc.StatusOrBuilder getErrorOrBuilder(); + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + * + * @return Whether the updatedSchema field is set. + */ + boolean hasUpdatedSchema(); + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + * + * @return The updatedSchema. + */ + com.google.cloud.bigquery.storage.v1.TableSchema getUpdatedSchema(); + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.TableSchema updated_schema = 3; + */ + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getUpdatedSchemaOrBuilder(); + + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + java.util.List getRowErrorsList(); + + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + com.google.cloud.bigquery.storage.v1.RowError getRowErrors(int index); + + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + int getRowErrorsCount(); + + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + java.util.List + getRowErrorsOrBuilderList(); + + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder getRowErrorsOrBuilder(int index); + + /** + * + * + *
+   * The target of the append operation. Matches the write_stream in the
+   * corresponding request.
+   * 
+ * + * string write_stream = 5; + * + * @return The writeStream. + */ + java.lang.String getWriteStream(); + + /** + * + * + *
+   * The target of the append operation. Matches the write_stream in the
+   * corresponding request.
+   * 
+ * + * string write_stream = 5; + * + * @return The bytes for writeStream. + */ + com.google.protobuf.ByteString getWriteStreamBytes(); + + com.google.cloud.bigquery.storage.v1.AppendRowsResponse.ResponseCase getResponseCase(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowProto.java new file mode 100644 index 000000000000..4fdf716dbb98 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowProto.java @@ -0,0 +1,106 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public final class ArrowProto { + private ArrowProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ArrowSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ArrowSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ArrowRecordBatch_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ArrowRecordBatch_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ArrowSerializationOptions_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ArrowSerializationOptions_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n,google/cloud/bigquery/storage/v1/arrow" + + ".proto\022 google.cloud.bigquery.storage.v1" + + "\"(\n\013ArrowSchema\022\031\n\021serialized_schema\030\001 \001" + + "(\014\"J\n\020ArrowRecordBatch\022\037\n\027serialized_rec" + + "ord_batch\030\001 \001(\014\022\025\n\trow_count\030\002 \001(\003B\002\030\001\"\354" + + "\003\n\031ArrowSerializationOptions\022h\n\022buffer_c" + + "ompression\030\002 \001(\0162L.google.cloud.bigquery" + + ".storage.v1.ArrowSerializationOptions.Co" + + "mpressionCodec\022v\n\031picos_timestamp_precis" + + "ion\030\003 \001(\0162S.google.cloud.bigquery.storag" + + "e.v1.ArrowSerializationOptions.PicosTime" + + "stampPrecision\"H\n\020CompressionCodec\022\033\n\027CO" + + "MPRESSION_UNSPECIFIED\020\000\022\r\n\tLZ4_FRAME\020\001\022\010" + + "\n\004ZSTD\020\002\"\242\001\n\027PicosTimestampPrecision\022)\n%" + + "PICOS_TIMESTAMP_PRECISION_UNSPECIFIED\020\000\022" + + "\036\n\032TIMESTAMP_PRECISION_MICROS\020\001\022\035\n\031TIMES" + + "TAMP_PRECISION_NANOS\020\002\022\035\n\031TIMESTAMP_PREC" + + "ISION_PICOS\020\003B\272\001\n$com.google.cloud.bigqu" + + "ery.storage.v1B\nArrowProtoP\001Z>cloud.goog" + + "le.com/go/bigquery/storage/apiv1/storage" + + "pb;storagepb\252\002 Google.Cloud.BigQuery.Sto" + + "rage.V1\312\002 Google\\Cloud\\BigQuery\\Storage\\" + + "V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); + internal_static_google_cloud_bigquery_storage_v1_ArrowSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1_ArrowSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ArrowSchema_descriptor, + new java.lang.String[] { + "SerializedSchema", + }); + internal_static_google_cloud_bigquery_storage_v1_ArrowRecordBatch_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1_ArrowRecordBatch_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ArrowRecordBatch_descriptor, + new java.lang.String[] { + "SerializedRecordBatch", "RowCount", + }); + internal_static_google_cloud_bigquery_storage_v1_ArrowSerializationOptions_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1_ArrowSerializationOptions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ArrowSerializationOptions_descriptor, + new java.lang.String[] { + "BufferCompression", "PicosTimestampPrecision", + }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatch.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatch.java new file mode 100644 index 000000000000..3f80fecfdbc8 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatch.java @@ -0,0 +1,661 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Arrow RecordBatch.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ArrowRecordBatch} + */ +public final class ArrowRecordBatch extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ArrowRecordBatch) + ArrowRecordBatchOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ArrowRecordBatch.newBuilder() to construct. + private ArrowRecordBatch(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ArrowRecordBatch() { + serializedRecordBatch_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ArrowRecordBatch(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowRecordBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowRecordBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.class, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder.class); + } + + public static final int SERIALIZED_RECORD_BATCH_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString serializedRecordBatch_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * IPC-serialized Arrow RecordBatch.
+   * 
+ * + * bytes serialized_record_batch = 1; + * + * @return The serializedRecordBatch. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedRecordBatch() { + return serializedRecordBatch_; + } + + public static final int ROW_COUNT_FIELD_NUMBER = 2; + private long rowCount_ = 0L; + + /** + * + * + *
+   * [Deprecated] The count of rows in `serialized_record_batch`.
+   * Please use the format-independent ReadRowsResponse.row_count instead.
+   * 
+ * + * int64 row_count = 2 [deprecated = true]; + * + * @deprecated google.cloud.bigquery.storage.v1.ArrowRecordBatch.row_count is deprecated. See + * google/cloud/bigquery/storage/v1/arrow.proto;l=43 + * @return The rowCount. + */ + @java.lang.Override + @java.lang.Deprecated + public long getRowCount() { + return rowCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!serializedRecordBatch_.isEmpty()) { + output.writeBytes(1, serializedRecordBatch_); + } + if (rowCount_ != 0L) { + output.writeInt64(2, rowCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!serializedRecordBatch_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, serializedRecordBatch_); + } + if (rowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, rowCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ArrowRecordBatch)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch other = + (com.google.cloud.bigquery.storage.v1.ArrowRecordBatch) obj; + + if (!getSerializedRecordBatch().equals(other.getSerializedRecordBatch())) return false; + if (getRowCount() != other.getRowCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERIALIZED_RECORD_BATCH_FIELD_NUMBER; + hash = (53 * hash) + getSerializedRecordBatch().hashCode(); + hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Arrow RecordBatch.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ArrowRecordBatch} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ArrowRecordBatch) + com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowRecordBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowRecordBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.class, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + serializedRecordBatch_ = com.google.protobuf.ByteString.EMPTY; + rowCount_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowRecordBatch_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatch getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatch build() { + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatch buildPartial() { + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch result = + new com.google.cloud.bigquery.storage.v1.ArrowRecordBatch(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.ArrowRecordBatch result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.serializedRecordBatch_ = serializedRecordBatch_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.rowCount_ = rowCount_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ArrowRecordBatch) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ArrowRecordBatch) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ArrowRecordBatch other) { + if (other == com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance()) + return this; + if (other.getSerializedRecordBatch() != com.google.protobuf.ByteString.EMPTY) { + setSerializedRecordBatch(other.getSerializedRecordBatch()); + } + if (other.getRowCount() != 0L) { + setRowCount(other.getRowCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + serializedRecordBatch_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + rowCount_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString serializedRecordBatch_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * IPC-serialized Arrow RecordBatch.
+     * 
+ * + * bytes serialized_record_batch = 1; + * + * @return The serializedRecordBatch. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedRecordBatch() { + return serializedRecordBatch_; + } + + /** + * + * + *
+     * IPC-serialized Arrow RecordBatch.
+     * 
+ * + * bytes serialized_record_batch = 1; + * + * @param value The serializedRecordBatch to set. + * @return This builder for chaining. + */ + public Builder setSerializedRecordBatch(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + serializedRecordBatch_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * IPC-serialized Arrow RecordBatch.
+     * 
+ * + * bytes serialized_record_batch = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedRecordBatch() { + bitField0_ = (bitField0_ & ~0x00000001); + serializedRecordBatch_ = getDefaultInstance().getSerializedRecordBatch(); + onChanged(); + return this; + } + + private long rowCount_; + + /** + * + * + *
+     * [Deprecated] The count of rows in `serialized_record_batch`.
+     * Please use the format-independent ReadRowsResponse.row_count instead.
+     * 
+ * + * int64 row_count = 2 [deprecated = true]; + * + * @deprecated google.cloud.bigquery.storage.v1.ArrowRecordBatch.row_count is deprecated. See + * google/cloud/bigquery/storage/v1/arrow.proto;l=43 + * @return The rowCount. + */ + @java.lang.Override + @java.lang.Deprecated + public long getRowCount() { + return rowCount_; + } + + /** + * + * + *
+     * [Deprecated] The count of rows in `serialized_record_batch`.
+     * Please use the format-independent ReadRowsResponse.row_count instead.
+     * 
+ * + * int64 row_count = 2 [deprecated = true]; + * + * @deprecated google.cloud.bigquery.storage.v1.ArrowRecordBatch.row_count is deprecated. See + * google/cloud/bigquery/storage/v1/arrow.proto;l=43 + * @param value The rowCount to set. + * @return This builder for chaining. + */ + @java.lang.Deprecated + public Builder setRowCount(long value) { + + rowCount_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * [Deprecated] The count of rows in `serialized_record_batch`.
+     * Please use the format-independent ReadRowsResponse.row_count instead.
+     * 
+ * + * int64 row_count = 2 [deprecated = true]; + * + * @deprecated google.cloud.bigquery.storage.v1.ArrowRecordBatch.row_count is deprecated. See + * google/cloud/bigquery/storage/v1/arrow.proto;l=43 + * @return This builder for chaining. + */ + @java.lang.Deprecated + public Builder clearRowCount() { + bitField0_ = (bitField0_ & ~0x00000002); + rowCount_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ArrowRecordBatch) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ArrowRecordBatch) + private static final com.google.cloud.bigquery.storage.v1.ArrowRecordBatch DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ArrowRecordBatch(); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowRecordBatch getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ArrowRecordBatch parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatch getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatchOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatchOrBuilder.java new file mode 100644 index 000000000000..3adab473022f --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatchOrBuilder.java @@ -0,0 +1,56 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface ArrowRecordBatchOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ArrowRecordBatch) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * IPC-serialized Arrow RecordBatch.
+   * 
+ * + * bytes serialized_record_batch = 1; + * + * @return The serializedRecordBatch. + */ + com.google.protobuf.ByteString getSerializedRecordBatch(); + + /** + * + * + *
+   * [Deprecated] The count of rows in `serialized_record_batch`.
+   * Please use the format-independent ReadRowsResponse.row_count instead.
+   * 
+ * + * int64 row_count = 2 [deprecated = true]; + * + * @deprecated google.cloud.bigquery.storage.v1.ArrowRecordBatch.row_count is deprecated. See + * google/cloud/bigquery/storage/v1/arrow.proto;l=43 + * @return The rowCount. + */ + @java.lang.Deprecated + long getRowCount(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchema.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchema.java new file mode 100644 index 000000000000..a603eaef2af6 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchema.java @@ -0,0 +1,555 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Arrow schema as specified in
+ * https://arrow.apache.org/docs/python/api/datatypes.html
+ * and serialized to bytes using IPC:
+ * https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc
+ *
+ * See code samples on how this message can be deserialized.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ArrowSchema} + */ +public final class ArrowSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ArrowSchema) + ArrowSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ArrowSchema.newBuilder() to construct. + private ArrowSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ArrowSchema() { + serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ArrowSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ArrowSchema.class, + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder.class); + } + + public static final int SERIALIZED_SCHEMA_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * IPC serialized Arrow schema.
+   * 
+ * + * bytes serialized_schema = 1; + * + * @return The serializedSchema. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedSchema() { + return serializedSchema_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!serializedSchema_.isEmpty()) { + output.writeBytes(1, serializedSchema_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!serializedSchema_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, serializedSchema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ArrowSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ArrowSchema other = + (com.google.cloud.bigquery.storage.v1.ArrowSchema) obj; + + if (!getSerializedSchema().equals(other.getSerializedSchema())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERIALIZED_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getSerializedSchema().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.ArrowSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Arrow schema as specified in
+   * https://arrow.apache.org/docs/python/api/datatypes.html
+   * and serialized to bytes using IPC:
+   * https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc
+   *
+   * See code samples on how this message can be deserialized.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ArrowSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ArrowSchema) + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ArrowSchema.class, + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.ArrowSchema.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchema build() { + com.google.cloud.bigquery.storage.v1.ArrowSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchema buildPartial() { + com.google.cloud.bigquery.storage.v1.ArrowSchema result = + new com.google.cloud.bigquery.storage.v1.ArrowSchema(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.ArrowSchema result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.serializedSchema_ = serializedSchema_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ArrowSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ArrowSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ArrowSchema other) { + if (other == com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance()) + return this; + if (other.getSerializedSchema() != com.google.protobuf.ByteString.EMPTY) { + setSerializedSchema(other.getSerializedSchema()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + serializedSchema_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * IPC serialized Arrow schema.
+     * 
+ * + * bytes serialized_schema = 1; + * + * @return The serializedSchema. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedSchema() { + return serializedSchema_; + } + + /** + * + * + *
+     * IPC serialized Arrow schema.
+     * 
+ * + * bytes serialized_schema = 1; + * + * @param value The serializedSchema to set. + * @return This builder for chaining. + */ + public Builder setSerializedSchema(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + serializedSchema_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * IPC serialized Arrow schema.
+     * 
+ * + * bytes serialized_schema = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedSchema() { + bitField0_ = (bitField0_ & ~0x00000001); + serializedSchema_ = getDefaultInstance().getSerializedSchema(); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ArrowSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ArrowSchema) + private static final com.google.cloud.bigquery.storage.v1.ArrowSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ArrowSchema(); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ArrowSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchemaOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchemaOrBuilder.java new file mode 100644 index 000000000000..537933883d57 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchemaOrBuilder.java @@ -0,0 +1,39 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface ArrowSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ArrowSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * IPC serialized Arrow schema.
+   * 
+ * + * bytes serialized_schema = 1; + * + * @return The serializedSchema. + */ + com.google.protobuf.ByteString getSerializedSchema(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptions.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptions.java new file mode 100644 index 000000000000..7e5151528bee --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptions.java @@ -0,0 +1,1203 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Contains options specific to Arrow Serialization.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ArrowSerializationOptions} + */ +public final class ArrowSerializationOptions extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + ArrowSerializationOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ArrowSerializationOptions.newBuilder() to construct. + private ArrowSerializationOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ArrowSerializationOptions() { + bufferCompression_ = 0; + picosTimestampPrecision_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ArrowSerializationOptions(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowSerializationOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowSerializationOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.class, + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.Builder.class); + } + + /** + * + * + *
+   * Compression codec's supported by Arrow.
+   * 
+ * + * Protobuf enum {@code + * google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec} + */ + public enum CompressionCodec implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * If unspecified no compression will be used.
+     * 
+ * + * COMPRESSION_UNSPECIFIED = 0; + */ + COMPRESSION_UNSPECIFIED(0), + /** + * + * + *
+     * LZ4 Frame (https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md)
+     * 
+ * + * LZ4_FRAME = 1; + */ + LZ4_FRAME(1), + /** + * + * + *
+     * Zstandard compression.
+     * 
+ * + * ZSTD = 2; + */ + ZSTD(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * If unspecified no compression will be used.
+     * 
+ * + * COMPRESSION_UNSPECIFIED = 0; + */ + public static final int COMPRESSION_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * LZ4 Frame (https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md)
+     * 
+ * + * LZ4_FRAME = 1; + */ + public static final int LZ4_FRAME_VALUE = 1; + + /** + * + * + *
+     * Zstandard compression.
+     * 
+ * + * ZSTD = 2; + */ + public static final int ZSTD_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static CompressionCodec valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static CompressionCodec forNumber(int value) { + switch (value) { + case 0: + return COMPRESSION_UNSPECIFIED; + case 1: + return LZ4_FRAME; + case 2: + return ZSTD; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public CompressionCodec findValueByNumber(int number) { + return CompressionCodec.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final CompressionCodec[] VALUES = values(); + + public static CompressionCodec valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private CompressionCodec(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec) + } + + /** + * + * + *
+   * The precision of the timestamp value in the Avro message. This precision
+   * will **only** be applied to the column(s) with the `TIMESTAMP_PICOS` type.
+   * 
+ * + * Protobuf enum {@code + * google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision} + */ + public enum PicosTimestampPrecision implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Unspecified timestamp precision. The default precision is microseconds.
+     * 
+ * + * PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0; + */ + PICOS_TIMESTAMP_PRECISION_UNSPECIFIED(0), + /** + * + * + *
+     * Timestamp values returned by Read API will be truncated to microsecond
+     * level precision. The value will be encoded as Arrow TIMESTAMP type in a
+     * 64 bit integer.
+     * 
+ * + * TIMESTAMP_PRECISION_MICROS = 1; + */ + TIMESTAMP_PRECISION_MICROS(1), + /** + * + * + *
+     * Timestamp values returned by Read API will be truncated to nanosecond
+     * level precision. The value will be encoded as Arrow TIMESTAMP type in a
+     * 64 bit integer.
+     * 
+ * + * TIMESTAMP_PRECISION_NANOS = 2; + */ + TIMESTAMP_PRECISION_NANOS(2), + /** + * + * + *
+     * Read API will return full precision picosecond value. The value will be
+     * encoded as a string which conforms to ISO 8601 format.
+     * 
+ * + * TIMESTAMP_PRECISION_PICOS = 3; + */ + TIMESTAMP_PRECISION_PICOS(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Unspecified timestamp precision. The default precision is microseconds.
+     * 
+ * + * PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0; + */ + public static final int PICOS_TIMESTAMP_PRECISION_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * Timestamp values returned by Read API will be truncated to microsecond
+     * level precision. The value will be encoded as Arrow TIMESTAMP type in a
+     * 64 bit integer.
+     * 
+ * + * TIMESTAMP_PRECISION_MICROS = 1; + */ + public static final int TIMESTAMP_PRECISION_MICROS_VALUE = 1; + + /** + * + * + *
+     * Timestamp values returned by Read API will be truncated to nanosecond
+     * level precision. The value will be encoded as Arrow TIMESTAMP type in a
+     * 64 bit integer.
+     * 
+ * + * TIMESTAMP_PRECISION_NANOS = 2; + */ + public static final int TIMESTAMP_PRECISION_NANOS_VALUE = 2; + + /** + * + * + *
+     * Read API will return full precision picosecond value. The value will be
+     * encoded as a string which conforms to ISO 8601 format.
+     * 
+ * + * TIMESTAMP_PRECISION_PICOS = 3; + */ + public static final int TIMESTAMP_PRECISION_PICOS_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static PicosTimestampPrecision valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static PicosTimestampPrecision forNumber(int value) { + switch (value) { + case 0: + return PICOS_TIMESTAMP_PRECISION_UNSPECIFIED; + case 1: + return TIMESTAMP_PRECISION_MICROS; + case 2: + return TIMESTAMP_PRECISION_NANOS; + case 3: + return TIMESTAMP_PRECISION_PICOS; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public PicosTimestampPrecision findValueByNumber(int number) { + return PicosTimestampPrecision.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.getDescriptor() + .getEnumTypes() + .get(1); + } + + private static final PicosTimestampPrecision[] VALUES = values(); + + public static PicosTimestampPrecision valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private PicosTimestampPrecision(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision) + } + + public static final int BUFFER_COMPRESSION_FIELD_NUMBER = 2; + private int bufferCompression_ = 0; + + /** + * + * + *
+   * The compression codec to use for Arrow buffers in serialized record
+   * batches.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec buffer_compression = 2; + * + * + * @return The enum numeric value on the wire for bufferCompression. + */ + @java.lang.Override + public int getBufferCompressionValue() { + return bufferCompression_; + } + + /** + * + * + *
+   * The compression codec to use for Arrow buffers in serialized record
+   * batches.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec buffer_compression = 2; + * + * + * @return The bufferCompression. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec + getBufferCompression() { + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec result = + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec.forNumber( + bufferCompression_); + return result == null + ? com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec + .UNRECOGNIZED + : result; + } + + public static final int PICOS_TIMESTAMP_PRECISION_FIELD_NUMBER = 3; + private int picosTimestampPrecision_ = 0; + + /** + * + * + *
+   * Optional. Set timestamp precision option. If not set, the default precision
+   * is microseconds.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3; + * + * + * @return The enum numeric value on the wire for picosTimestampPrecision. + */ + @java.lang.Override + public int getPicosTimestampPrecisionValue() { + return picosTimestampPrecision_; + } + + /** + * + * + *
+   * Optional. Set timestamp precision option. If not set, the default precision
+   * is microseconds.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3; + * + * + * @return The picosTimestampPrecision. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision + getPicosTimestampPrecision() { + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision result = + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision + .forNumber(picosTimestampPrecision_); + return result == null + ? com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision + .UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (bufferCompression_ + != com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec + .COMPRESSION_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, bufferCompression_); + } + if (picosTimestampPrecision_ + != com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision + .PICOS_TIMESTAMP_PRECISION_UNSPECIFIED + .getNumber()) { + output.writeEnum(3, picosTimestampPrecision_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (bufferCompression_ + != com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec + .COMPRESSION_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, bufferCompression_); + } + if (picosTimestampPrecision_ + != com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision + .PICOS_TIMESTAMP_PRECISION_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, picosTimestampPrecision_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions other = + (com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions) obj; + + if (bufferCompression_ != other.bufferCompression_) return false; + if (picosTimestampPrecision_ != other.picosTimestampPrecision_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + BUFFER_COMPRESSION_FIELD_NUMBER; + hash = (53 * hash) + bufferCompression_; + hash = (37 * hash) + PICOS_TIMESTAMP_PRECISION_FIELD_NUMBER; + hash = (53 * hash) + picosTimestampPrecision_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Contains options specific to Arrow Serialization.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ArrowSerializationOptions} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowSerializationOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowSerializationOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.class, + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + bufferCompression_ = 0; + picosTimestampPrecision_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1_ArrowSerializationOptions_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions build() { + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions buildPartial() { + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions result = + new com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.bufferCompression_ = bufferCompression_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.picosTimestampPrecision_ = picosTimestampPrecision_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions other) { + if (other + == com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.getDefaultInstance()) + return this; + if (other.bufferCompression_ != 0) { + setBufferCompressionValue(other.getBufferCompressionValue()); + } + if (other.picosTimestampPrecision_ != 0) { + setPicosTimestampPrecisionValue(other.getPicosTimestampPrecisionValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 16: + { + bufferCompression_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 16 + case 24: + { + picosTimestampPrecision_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int bufferCompression_ = 0; + + /** + * + * + *
+     * The compression codec to use for Arrow buffers in serialized record
+     * batches.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec buffer_compression = 2; + * + * + * @return The enum numeric value on the wire for bufferCompression. + */ + @java.lang.Override + public int getBufferCompressionValue() { + return bufferCompression_; + } + + /** + * + * + *
+     * The compression codec to use for Arrow buffers in serialized record
+     * batches.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec buffer_compression = 2; + * + * + * @param value The enum numeric value on the wire for bufferCompression to set. + * @return This builder for chaining. + */ + public Builder setBufferCompressionValue(int value) { + bufferCompression_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The compression codec to use for Arrow buffers in serialized record
+     * batches.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec buffer_compression = 2; + * + * + * @return The bufferCompression. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec + getBufferCompression() { + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec result = + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec.forNumber( + bufferCompression_); + return result == null + ? com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec + .UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * The compression codec to use for Arrow buffers in serialized record
+     * batches.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec buffer_compression = 2; + * + * + * @param value The bufferCompression to set. + * @return This builder for chaining. + */ + public Builder setBufferCompression( + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + bufferCompression_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * The compression codec to use for Arrow buffers in serialized record
+     * batches.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec buffer_compression = 2; + * + * + * @return This builder for chaining. + */ + public Builder clearBufferCompression() { + bitField0_ = (bitField0_ & ~0x00000001); + bufferCompression_ = 0; + onChanged(); + return this; + } + + private int picosTimestampPrecision_ = 0; + + /** + * + * + *
+     * Optional. Set timestamp precision option. If not set, the default precision
+     * is microseconds.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3; + * + * + * @return The enum numeric value on the wire for picosTimestampPrecision. + */ + @java.lang.Override + public int getPicosTimestampPrecisionValue() { + return picosTimestampPrecision_; + } + + /** + * + * + *
+     * Optional. Set timestamp precision option. If not set, the default precision
+     * is microseconds.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3; + * + * + * @param value The enum numeric value on the wire for picosTimestampPrecision to set. + * @return This builder for chaining. + */ + public Builder setPicosTimestampPrecisionValue(int value) { + picosTimestampPrecision_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Set timestamp precision option. If not set, the default precision
+     * is microseconds.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3; + * + * + * @return The picosTimestampPrecision. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision + getPicosTimestampPrecision() { + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision + result = + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision + .forNumber(picosTimestampPrecision_); + return result == null + ? com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision + .UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Optional. Set timestamp precision option. If not set, the default precision
+     * is microseconds.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3; + * + * + * @param value The picosTimestampPrecision to set. + * @return This builder for chaining. + */ + public Builder setPicosTimestampPrecision( + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision + value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + picosTimestampPrecision_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Set timestamp precision option. If not set, the default precision
+     * is microseconds.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3; + * + * + * @return This builder for chaining. + */ + public Builder clearPicosTimestampPrecision() { + bitField0_ = (bitField0_ & ~0x00000002); + picosTimestampPrecision_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + private static final com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions(); + } + + public static com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ArrowSerializationOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptionsOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptionsOrBuilder.java new file mode 100644 index 000000000000..7223ad0e9cac --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptionsOrBuilder.java @@ -0,0 +1,92 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface ArrowSerializationOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The compression codec to use for Arrow buffers in serialized record
+   * batches.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec buffer_compression = 2; + * + * + * @return The enum numeric value on the wire for bufferCompression. + */ + int getBufferCompressionValue(); + + /** + * + * + *
+   * The compression codec to use for Arrow buffers in serialized record
+   * batches.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec buffer_compression = 2; + * + * + * @return The bufferCompression. + */ + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec + getBufferCompression(); + + /** + * + * + *
+   * Optional. Set timestamp precision option. If not set, the default precision
+   * is microseconds.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3; + * + * + * @return The enum numeric value on the wire for picosTimestampPrecision. + */ + int getPicosTimestampPrecisionValue(); + + /** + * + * + *
+   * Optional. Set timestamp precision option. If not set, the default precision
+   * is microseconds.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3; + * + * + * @return The picosTimestampPrecision. + */ + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision + getPicosTimestampPrecision(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroProto.java new file mode 100644 index 000000000000..fd13a61e8572 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroProto.java @@ -0,0 +1,102 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public final class AvroProto { + private AvroProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AvroSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AvroSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AvroRows_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AvroRows_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AvroSerializationOptions_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AvroSerializationOptions_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n+google/cloud/bigquery/storage/v1/avro." + + "proto\022 google.cloud.bigquery.storage.v1\"" + + "\034\n\nAvroSchema\022\016\n\006schema\030\001 \001(\t\"A\n\010AvroRow" + + "s\022\036\n\026serialized_binary_rows\030\001 \001(\014\022\025\n\trow" + + "_count\030\002 \001(\003B\002\030\001\"\335\002\n\030AvroSerializationOp" + + "tions\022%\n\035enable_display_name_attribute\030\001" + + " \001(\010\022u\n\031picos_timestamp_precision\030\002 \001(\0162" + + "R.google.cloud.bigquery.storage.v1.AvroS" + + "erializationOptions.PicosTimestampPrecis" + + "ion\"\242\001\n\027PicosTimestampPrecision\022)\n%PICOS" + + "_TIMESTAMP_PRECISION_UNSPECIFIED\020\000\022\036\n\032TI" + + "MESTAMP_PRECISION_MICROS\020\001\022\035\n\031TIMESTAMP_" + + "PRECISION_NANOS\020\002\022\035\n\031TIMESTAMP_PRECISION" + + "_PICOS\020\003B\271\001\n$com.google.cloud.bigquery.s" + + "torage.v1B\tAvroProtoP\001Z>cloud.google.com" + + "/go/bigquery/storage/apiv1/storagepb;sto" + + "ragepb\252\002 Google.Cloud.BigQuery.Storage.V" + + "1\312\002 Google\\Cloud\\BigQuery\\Storage\\V1b\006pr" + + "oto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); + internal_static_google_cloud_bigquery_storage_v1_AvroSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1_AvroSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AvroSchema_descriptor, + new java.lang.String[] { + "Schema", + }); + internal_static_google_cloud_bigquery_storage_v1_AvroRows_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1_AvroRows_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AvroRows_descriptor, + new java.lang.String[] { + "SerializedBinaryRows", "RowCount", + }); + internal_static_google_cloud_bigquery_storage_v1_AvroSerializationOptions_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1_AvroSerializationOptions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AvroSerializationOptions_descriptor, + new java.lang.String[] { + "EnableDisplayNameAttribute", "PicosTimestampPrecision", + }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRows.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRows.java new file mode 100644 index 000000000000..1b1636c8efcb --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRows.java @@ -0,0 +1,659 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Avro rows.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AvroRows} + */ +public final class AvroRows extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.AvroRows) + AvroRowsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AvroRows.newBuilder() to construct. + private AvroRows(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AvroRows() { + serializedBinaryRows_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AvroRows(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AvroRows.class, + com.google.cloud.bigquery.storage.v1.AvroRows.Builder.class); + } + + public static final int SERIALIZED_BINARY_ROWS_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString serializedBinaryRows_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * Binary serialized rows in a block.
+   * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return The serializedBinaryRows. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedBinaryRows() { + return serializedBinaryRows_; + } + + public static final int ROW_COUNT_FIELD_NUMBER = 2; + private long rowCount_ = 0L; + + /** + * + * + *
+   * [Deprecated] The count of rows in the returning block.
+   * Please use the format-independent ReadRowsResponse.row_count instead.
+   * 
+ * + * int64 row_count = 2 [deprecated = true]; + * + * @deprecated google.cloud.bigquery.storage.v1.AvroRows.row_count is deprecated. See + * google/cloud/bigquery/storage/v1/avro.proto;l=39 + * @return The rowCount. + */ + @java.lang.Override + @java.lang.Deprecated + public long getRowCount() { + return rowCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!serializedBinaryRows_.isEmpty()) { + output.writeBytes(1, serializedBinaryRows_); + } + if (rowCount_ != 0L) { + output.writeInt64(2, rowCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!serializedBinaryRows_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, serializedBinaryRows_); + } + if (rowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, rowCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.AvroRows)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.AvroRows other = + (com.google.cloud.bigquery.storage.v1.AvroRows) obj; + + if (!getSerializedBinaryRows().equals(other.getSerializedBinaryRows())) return false; + if (getRowCount() != other.getRowCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERIALIZED_BINARY_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getSerializedBinaryRows().hashCode(); + hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.AvroRows prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Avro rows.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AvroRows} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.AvroRows) + com.google.cloud.bigquery.storage.v1.AvroRowsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AvroRows.class, + com.google.cloud.bigquery.storage.v1.AvroRows.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.AvroRows.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + serializedBinaryRows_ = com.google.protobuf.ByteString.EMPTY; + rowCount_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroRows_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroRows getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.AvroRows.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroRows build() { + com.google.cloud.bigquery.storage.v1.AvroRows result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroRows buildPartial() { + com.google.cloud.bigquery.storage.v1.AvroRows result = + new com.google.cloud.bigquery.storage.v1.AvroRows(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.AvroRows result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.serializedBinaryRows_ = serializedBinaryRows_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.rowCount_ = rowCount_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.AvroRows) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.AvroRows) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.AvroRows other) { + if (other == com.google.cloud.bigquery.storage.v1.AvroRows.getDefaultInstance()) return this; + if (other.getSerializedBinaryRows() != com.google.protobuf.ByteString.EMPTY) { + setSerializedBinaryRows(other.getSerializedBinaryRows()); + } + if (other.getRowCount() != 0L) { + setRowCount(other.getRowCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + serializedBinaryRows_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + rowCount_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString serializedBinaryRows_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * Binary serialized rows in a block.
+     * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return The serializedBinaryRows. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedBinaryRows() { + return serializedBinaryRows_; + } + + /** + * + * + *
+     * Binary serialized rows in a block.
+     * 
+ * + * bytes serialized_binary_rows = 1; + * + * @param value The serializedBinaryRows to set. + * @return This builder for chaining. + */ + public Builder setSerializedBinaryRows(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + serializedBinaryRows_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Binary serialized rows in a block.
+     * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedBinaryRows() { + bitField0_ = (bitField0_ & ~0x00000001); + serializedBinaryRows_ = getDefaultInstance().getSerializedBinaryRows(); + onChanged(); + return this; + } + + private long rowCount_; + + /** + * + * + *
+     * [Deprecated] The count of rows in the returning block.
+     * Please use the format-independent ReadRowsResponse.row_count instead.
+     * 
+ * + * int64 row_count = 2 [deprecated = true]; + * + * @deprecated google.cloud.bigquery.storage.v1.AvroRows.row_count is deprecated. See + * google/cloud/bigquery/storage/v1/avro.proto;l=39 + * @return The rowCount. + */ + @java.lang.Override + @java.lang.Deprecated + public long getRowCount() { + return rowCount_; + } + + /** + * + * + *
+     * [Deprecated] The count of rows in the returning block.
+     * Please use the format-independent ReadRowsResponse.row_count instead.
+     * 
+ * + * int64 row_count = 2 [deprecated = true]; + * + * @deprecated google.cloud.bigquery.storage.v1.AvroRows.row_count is deprecated. See + * google/cloud/bigquery/storage/v1/avro.proto;l=39 + * @param value The rowCount to set. + * @return This builder for chaining. + */ + @java.lang.Deprecated + public Builder setRowCount(long value) { + + rowCount_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * [Deprecated] The count of rows in the returning block.
+     * Please use the format-independent ReadRowsResponse.row_count instead.
+     * 
+ * + * int64 row_count = 2 [deprecated = true]; + * + * @deprecated google.cloud.bigquery.storage.v1.AvroRows.row_count is deprecated. See + * google/cloud/bigquery/storage/v1/avro.proto;l=39 + * @return This builder for chaining. + */ + @java.lang.Deprecated + public Builder clearRowCount() { + bitField0_ = (bitField0_ & ~0x00000002); + rowCount_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.AvroRows) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.AvroRows) + private static final com.google.cloud.bigquery.storage.v1.AvroRows DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.AvroRows(); + } + + public static com.google.cloud.bigquery.storage.v1.AvroRows getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AvroRows parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroRows getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRowsOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRowsOrBuilder.java new file mode 100644 index 000000000000..56e6c6080395 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRowsOrBuilder.java @@ -0,0 +1,56 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface AvroRowsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.AvroRows) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Binary serialized rows in a block.
+   * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return The serializedBinaryRows. + */ + com.google.protobuf.ByteString getSerializedBinaryRows(); + + /** + * + * + *
+   * [Deprecated] The count of rows in the returning block.
+   * Please use the format-independent ReadRowsResponse.row_count instead.
+   * 
+ * + * int64 row_count = 2 [deprecated = true]; + * + * @deprecated google.cloud.bigquery.storage.v1.AvroRows.row_count is deprecated. See + * google/cloud/bigquery/storage/v1/avro.proto;l=39 + * @return The rowCount. + */ + @java.lang.Deprecated + long getRowCount(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchema.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchema.java new file mode 100644 index 000000000000..bbf13a0d40f1 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchema.java @@ -0,0 +1,641 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Avro schema.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AvroSchema} + */ +public final class AvroSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.AvroSchema) + AvroSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AvroSchema.newBuilder() to construct. + private AvroSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AvroSchema() { + schema_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AvroSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AvroSchema.class, + com.google.cloud.bigquery.storage.v1.AvroSchema.Builder.class); + } + + public static final int SCHEMA_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object schema_ = ""; + + /** + * + * + *
+   * Json serialized schema, as described at
+   * https://avro.apache.org/docs/1.8.1/spec.html.
+   * 
+ * + * string schema = 1; + * + * @return The schema. + */ + @java.lang.Override + public java.lang.String getSchema() { + java.lang.Object ref = schema_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + schema_ = s; + return s; + } + } + + /** + * + * + *
+   * Json serialized schema, as described at
+   * https://avro.apache.org/docs/1.8.1/spec.html.
+   * 
+ * + * string schema = 1; + * + * @return The bytes for schema. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSchemaBytes() { + java.lang.Object ref = schema_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + schema_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(schema_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, schema_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(schema_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, schema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.AvroSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.AvroSchema other = + (com.google.cloud.bigquery.storage.v1.AvroSchema) obj; + + if (!getSchema().equals(other.getSchema())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getSchema().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.AvroSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Avro schema.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AvroSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.AvroSchema) + com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AvroSchema.class, + com.google.cloud.bigquery.storage.v1.AvroSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.AvroSchema.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + schema_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSchema build() { + com.google.cloud.bigquery.storage.v1.AvroSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSchema buildPartial() { + com.google.cloud.bigquery.storage.v1.AvroSchema result = + new com.google.cloud.bigquery.storage.v1.AvroSchema(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.AvroSchema result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.schema_ = schema_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.AvroSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.AvroSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.AvroSchema other) { + if (other == com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance()) + return this; + if (!other.getSchema().isEmpty()) { + schema_ = other.schema_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + schema_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object schema_ = ""; + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @return The schema. + */ + public java.lang.String getSchema() { + java.lang.Object ref = schema_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + schema_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @return The bytes for schema. + */ + public com.google.protobuf.ByteString getSchemaBytes() { + java.lang.Object ref = schema_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + schema_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @param value The schema to set. + * @return This builder for chaining. + */ + public Builder setSchema(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @return This builder for chaining. + */ + public Builder clearSchema() { + schema_ = getDefaultInstance().getSchema(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @param value The bytes for schema to set. + * @return This builder for chaining. + */ + public Builder setSchemaBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + schema_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.AvroSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.AvroSchema) + private static final com.google.cloud.bigquery.storage.v1.AvroSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.AvroSchema(); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AvroSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchemaOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchemaOrBuilder.java new file mode 100644 index 000000000000..b8bfa619d93b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchemaOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface AvroSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.AvroSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Json serialized schema, as described at
+   * https://avro.apache.org/docs/1.8.1/spec.html.
+   * 
+ * + * string schema = 1; + * + * @return The schema. + */ + java.lang.String getSchema(); + + /** + * + * + *
+   * Json serialized schema, as described at
+   * https://avro.apache.org/docs/1.8.1/spec.html.
+   * 
+ * + * string schema = 1; + * + * @return The bytes for schema. + */ + com.google.protobuf.ByteString getSchemaBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptions.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptions.java new file mode 100644 index 000000000000..8f82f91f75ae --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptions.java @@ -0,0 +1,976 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Contains options specific to Avro Serialization.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AvroSerializationOptions} + */ +public final class AvroSerializationOptions extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.AvroSerializationOptions) + AvroSerializationOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AvroSerializationOptions.newBuilder() to construct. + private AvroSerializationOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AvroSerializationOptions() { + picosTimestampPrecision_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AvroSerializationOptions(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroSerializationOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroSerializationOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.class, + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.Builder.class); + } + + /** + * + * + *
+   * The precision of the timestamp value in the Avro message. This precision
+   * will **only** be applied to the column(s) with the `TIMESTAMP_PICOS` type.
+   * 
+ * + * Protobuf enum {@code + * google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision} + */ + public enum PicosTimestampPrecision implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Unspecified timestamp precision. The default precision is microseconds.
+     * 
+ * + * PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0; + */ + PICOS_TIMESTAMP_PRECISION_UNSPECIFIED(0), + /** + * + * + *
+     * Timestamp values returned by Read API will be truncated to microsecond
+     * level precision. The value will be encoded as Avro TIMESTAMP type in a
+     * 64 bit integer.
+     * 
+ * + * TIMESTAMP_PRECISION_MICROS = 1; + */ + TIMESTAMP_PRECISION_MICROS(1), + /** + * + * + *
+     * Timestamp values returned by Read API will be truncated to nanosecond
+     * level precision. The value will be encoded as Avro TIMESTAMP type in a
+     * 64 bit integer.
+     * 
+ * + * TIMESTAMP_PRECISION_NANOS = 2; + */ + TIMESTAMP_PRECISION_NANOS(2), + /** + * + * + *
+     * Read API will return full precision picosecond value. The value will be
+     * encoded as a string which conforms to ISO 8601 format.
+     * 
+ * + * TIMESTAMP_PRECISION_PICOS = 3; + */ + TIMESTAMP_PRECISION_PICOS(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Unspecified timestamp precision. The default precision is microseconds.
+     * 
+ * + * PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0; + */ + public static final int PICOS_TIMESTAMP_PRECISION_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * Timestamp values returned by Read API will be truncated to microsecond
+     * level precision. The value will be encoded as Avro TIMESTAMP type in a
+     * 64 bit integer.
+     * 
+ * + * TIMESTAMP_PRECISION_MICROS = 1; + */ + public static final int TIMESTAMP_PRECISION_MICROS_VALUE = 1; + + /** + * + * + *
+     * Timestamp values returned by Read API will be truncated to nanosecond
+     * level precision. The value will be encoded as Avro TIMESTAMP type in a
+     * 64 bit integer.
+     * 
+ * + * TIMESTAMP_PRECISION_NANOS = 2; + */ + public static final int TIMESTAMP_PRECISION_NANOS_VALUE = 2; + + /** + * + * + *
+     * Read API will return full precision picosecond value. The value will be
+     * encoded as a string which conforms to ISO 8601 format.
+     * 
+ * + * TIMESTAMP_PRECISION_PICOS = 3; + */ + public static final int TIMESTAMP_PRECISION_PICOS_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static PicosTimestampPrecision valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static PicosTimestampPrecision forNumber(int value) { + switch (value) { + case 0: + return PICOS_TIMESTAMP_PRECISION_UNSPECIFIED; + case 1: + return TIMESTAMP_PRECISION_MICROS; + case 2: + return TIMESTAMP_PRECISION_NANOS; + case 3: + return TIMESTAMP_PRECISION_PICOS; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public PicosTimestampPrecision findValueByNumber(int number) { + return PicosTimestampPrecision.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final PicosTimestampPrecision[] VALUES = values(); + + public static PicosTimestampPrecision valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private PicosTimestampPrecision(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision) + } + + public static final int ENABLE_DISPLAY_NAME_ATTRIBUTE_FIELD_NUMBER = 1; + private boolean enableDisplayNameAttribute_ = false; + + /** + * + * + *
+   * Enable displayName attribute in Avro schema.
+   *
+   * The Avro specification requires field names to be alphanumeric.  By
+   * default, in cases when column names do not conform to these requirements
+   * (e.g. non-ascii unicode codepoints) and Avro is requested as an output
+   * format, the CreateReadSession call will fail.
+   *
+   * Setting this field to true, populates avro field names with a placeholder
+   * value and populates a "displayName" attribute for every avro field with the
+   * original column name.
+   * 
+ * + * bool enable_display_name_attribute = 1; + * + * @return The enableDisplayNameAttribute. + */ + @java.lang.Override + public boolean getEnableDisplayNameAttribute() { + return enableDisplayNameAttribute_; + } + + public static final int PICOS_TIMESTAMP_PRECISION_FIELD_NUMBER = 2; + private int picosTimestampPrecision_ = 0; + + /** + * + * + *
+   * Optional. Set timestamp precision option. If not set, the default precision
+   * is microseconds.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2; + * + * + * @return The enum numeric value on the wire for picosTimestampPrecision. + */ + @java.lang.Override + public int getPicosTimestampPrecisionValue() { + return picosTimestampPrecision_; + } + + /** + * + * + *
+   * Optional. Set timestamp precision option. If not set, the default precision
+   * is microseconds.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2; + * + * + * @return The picosTimestampPrecision. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision + getPicosTimestampPrecision() { + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision result = + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision + .forNumber(picosTimestampPrecision_); + return result == null + ? com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision + .UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (enableDisplayNameAttribute_ != false) { + output.writeBool(1, enableDisplayNameAttribute_); + } + if (picosTimestampPrecision_ + != com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision + .PICOS_TIMESTAMP_PRECISION_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, picosTimestampPrecision_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (enableDisplayNameAttribute_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, enableDisplayNameAttribute_); + } + if (picosTimestampPrecision_ + != com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision + .PICOS_TIMESTAMP_PRECISION_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, picosTimestampPrecision_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.AvroSerializationOptions)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions other = + (com.google.cloud.bigquery.storage.v1.AvroSerializationOptions) obj; + + if (getEnableDisplayNameAttribute() != other.getEnableDisplayNameAttribute()) return false; + if (picosTimestampPrecision_ != other.picosTimestampPrecision_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENABLE_DISPLAY_NAME_ATTRIBUTE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableDisplayNameAttribute()); + hash = (37 * hash) + PICOS_TIMESTAMP_PRECISION_FIELD_NUMBER; + hash = (53 * hash) + picosTimestampPrecision_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Contains options specific to Avro Serialization.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.AvroSerializationOptions} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.AvroSerializationOptions) + com.google.cloud.bigquery.storage.v1.AvroSerializationOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroSerializationOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroSerializationOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.class, + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + enableDisplayNameAttribute_ = false; + picosTimestampPrecision_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1_AvroSerializationOptions_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSerializationOptions + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSerializationOptions build() { + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSerializationOptions buildPartial() { + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions result = + new com.google.cloud.bigquery.storage.v1.AvroSerializationOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.enableDisplayNameAttribute_ = enableDisplayNameAttribute_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.picosTimestampPrecision_ = picosTimestampPrecision_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.AvroSerializationOptions) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.AvroSerializationOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.AvroSerializationOptions other) { + if (other + == com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.getDefaultInstance()) + return this; + if (other.getEnableDisplayNameAttribute() != false) { + setEnableDisplayNameAttribute(other.getEnableDisplayNameAttribute()); + } + if (other.picosTimestampPrecision_ != 0) { + setPicosTimestampPrecisionValue(other.getPicosTimestampPrecisionValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + enableDisplayNameAttribute_ = input.readBool(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + picosTimestampPrecision_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private boolean enableDisplayNameAttribute_; + + /** + * + * + *
+     * Enable displayName attribute in Avro schema.
+     *
+     * The Avro specification requires field names to be alphanumeric.  By
+     * default, in cases when column names do not conform to these requirements
+     * (e.g. non-ascii unicode codepoints) and Avro is requested as an output
+     * format, the CreateReadSession call will fail.
+     *
+     * Setting this field to true, populates avro field names with a placeholder
+     * value and populates a "displayName" attribute for every avro field with the
+     * original column name.
+     * 
+ * + * bool enable_display_name_attribute = 1; + * + * @return The enableDisplayNameAttribute. + */ + @java.lang.Override + public boolean getEnableDisplayNameAttribute() { + return enableDisplayNameAttribute_; + } + + /** + * + * + *
+     * Enable displayName attribute in Avro schema.
+     *
+     * The Avro specification requires field names to be alphanumeric.  By
+     * default, in cases when column names do not conform to these requirements
+     * (e.g. non-ascii unicode codepoints) and Avro is requested as an output
+     * format, the CreateReadSession call will fail.
+     *
+     * Setting this field to true, populates avro field names with a placeholder
+     * value and populates a "displayName" attribute for every avro field with the
+     * original column name.
+     * 
+ * + * bool enable_display_name_attribute = 1; + * + * @param value The enableDisplayNameAttribute to set. + * @return This builder for chaining. + */ + public Builder setEnableDisplayNameAttribute(boolean value) { + + enableDisplayNameAttribute_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Enable displayName attribute in Avro schema.
+     *
+     * The Avro specification requires field names to be alphanumeric.  By
+     * default, in cases when column names do not conform to these requirements
+     * (e.g. non-ascii unicode codepoints) and Avro is requested as an output
+     * format, the CreateReadSession call will fail.
+     *
+     * Setting this field to true, populates avro field names with a placeholder
+     * value and populates a "displayName" attribute for every avro field with the
+     * original column name.
+     * 
+ * + * bool enable_display_name_attribute = 1; + * + * @return This builder for chaining. + */ + public Builder clearEnableDisplayNameAttribute() { + bitField0_ = (bitField0_ & ~0x00000001); + enableDisplayNameAttribute_ = false; + onChanged(); + return this; + } + + private int picosTimestampPrecision_ = 0; + + /** + * + * + *
+     * Optional. Set timestamp precision option. If not set, the default precision
+     * is microseconds.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2; + * + * + * @return The enum numeric value on the wire for picosTimestampPrecision. + */ + @java.lang.Override + public int getPicosTimestampPrecisionValue() { + return picosTimestampPrecision_; + } + + /** + * + * + *
+     * Optional. Set timestamp precision option. If not set, the default precision
+     * is microseconds.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2; + * + * + * @param value The enum numeric value on the wire for picosTimestampPrecision to set. + * @return This builder for chaining. + */ + public Builder setPicosTimestampPrecisionValue(int value) { + picosTimestampPrecision_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Set timestamp precision option. If not set, the default precision
+     * is microseconds.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2; + * + * + * @return The picosTimestampPrecision. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision + getPicosTimestampPrecision() { + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision result = + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision + .forNumber(picosTimestampPrecision_); + return result == null + ? com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision + .UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Optional. Set timestamp precision option. If not set, the default precision
+     * is microseconds.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2; + * + * + * @param value The picosTimestampPrecision to set. + * @return This builder for chaining. + */ + public Builder setPicosTimestampPrecision( + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision + value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + picosTimestampPrecision_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Set timestamp precision option. If not set, the default precision
+     * is microseconds.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2; + * + * + * @return This builder for chaining. + */ + public Builder clearPicosTimestampPrecision() { + bitField0_ = (bitField0_ & ~0x00000002); + picosTimestampPrecision_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.AvroSerializationOptions) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.AvroSerializationOptions) + private static final com.google.cloud.bigquery.storage.v1.AvroSerializationOptions + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.AvroSerializationOptions(); + } + + public static com.google.cloud.bigquery.storage.v1.AvroSerializationOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AvroSerializationOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSerializationOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptionsOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptionsOrBuilder.java new file mode 100644 index 000000000000..71ed37c86944 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptionsOrBuilder.java @@ -0,0 +1,81 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface AvroSerializationOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.AvroSerializationOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Enable displayName attribute in Avro schema.
+   *
+   * The Avro specification requires field names to be alphanumeric.  By
+   * default, in cases when column names do not conform to these requirements
+   * (e.g. non-ascii unicode codepoints) and Avro is requested as an output
+   * format, the CreateReadSession call will fail.
+   *
+   * Setting this field to true, populates avro field names with a placeholder
+   * value and populates a "displayName" attribute for every avro field with the
+   * original column name.
+   * 
+ * + * bool enable_display_name_attribute = 1; + * + * @return The enableDisplayNameAttribute. + */ + boolean getEnableDisplayNameAttribute(); + + /** + * + * + *
+   * Optional. Set timestamp precision option. If not set, the default precision
+   * is microseconds.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2; + * + * + * @return The enum numeric value on the wire for picosTimestampPrecision. + */ + int getPicosTimestampPrecisionValue(); + + /** + * + * + *
+   * Optional. Set timestamp precision option. If not set, the default precision
+   * is microseconds.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2; + * + * + * @return The picosTimestampPrecision. + */ + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision + getPicosTimestampPrecision(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequest.java new file mode 100644 index 000000000000..5e63c2c7a7cc --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequest.java @@ -0,0 +1,958 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `BatchCommitWriteStreams`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest} + */ +public final class BatchCommitWriteStreamsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) + BatchCommitWriteStreamsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchCommitWriteStreamsRequest.newBuilder() to construct. + private BatchCommitWriteStreamsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCommitWriteStreamsRequest() { + parent_ = ""; + writeStreams_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCommitWriteStreamsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest.class, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Parent table that all the streams should belong to, in the form
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Parent table that all the streams should belong to, in the form
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int WRITE_STREAMS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList writeStreams_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the writeStreams. + */ + public com.google.protobuf.ProtocolStringList getWriteStreamsList() { + return writeStreams_; + } + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of writeStreams. + */ + public int getWriteStreamsCount() { + return writeStreams_.size(); + } + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The writeStreams at the given index. + */ + public java.lang.String getWriteStreams(int index) { + return writeStreams_.get(index); + } + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the writeStreams at the given index. + */ + public com.google.protobuf.ByteString getWriteStreamsBytes(int index) { + return writeStreams_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + for (int i = 0; i < writeStreams_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, writeStreams_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + { + int dataSize = 0; + for (int i = 0; i < writeStreams_.size(); i++) { + dataSize += computeStringSizeNoTag(writeStreams_.getRaw(i)); + } + size += dataSize; + size += 1 * getWriteStreamsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest other = + (com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getWriteStreamsList().equals(other.getWriteStreamsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getWriteStreamsCount() > 0) { + hash = (37 * hash) + WRITE_STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getWriteStreamsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `BatchCommitWriteStreams`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest.class, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + writeStreams_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest build() { + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest result = + new com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + writeStreams_.makeImmutable(); + result.writeStreams_ = writeStreams_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.writeStreams_.isEmpty()) { + if (writeStreams_.isEmpty()) { + writeStreams_ = other.writeStreams_; + bitField0_ |= 0x00000002; + } else { + ensureWriteStreamsIsMutable(); + writeStreams_.addAll(other.writeStreams_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureWriteStreamsIsMutable(); + writeStreams_.add(s); + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList writeStreams_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureWriteStreamsIsMutable() { + if (!writeStreams_.isModifiable()) { + writeStreams_ = new com.google.protobuf.LazyStringArrayList(writeStreams_); + } + bitField0_ |= 0x00000002; + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the writeStreams. + */ + public com.google.protobuf.ProtocolStringList getWriteStreamsList() { + writeStreams_.makeImmutable(); + return writeStreams_; + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of writeStreams. + */ + public int getWriteStreamsCount() { + return writeStreams_.size(); + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The writeStreams at the given index. + */ + public java.lang.String getWriteStreams(int index) { + return writeStreams_.get(index); + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the writeStreams at the given index. + */ + public com.google.protobuf.ByteString getWriteStreamsBytes(int index) { + return writeStreams_.getByteString(index); + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The writeStreams to set. + * @return This builder for chaining. + */ + public Builder setWriteStreams(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWriteStreamsIsMutable(); + writeStreams_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The writeStreams to add. + * @return This builder for chaining. + */ + public Builder addWriteStreams(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWriteStreamsIsMutable(); + writeStreams_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The writeStreams to add. + * @return This builder for chaining. + */ + public Builder addAllWriteStreams(java.lang.Iterable values) { + ensureWriteStreamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, writeStreams_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearWriteStreams() { + writeStreams_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the writeStreams to add. + * @return This builder for chaining. + */ + public Builder addWriteStreamsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureWriteStreamsIsMutable(); + writeStreams_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) + private static final com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCommitWriteStreamsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequestOrBuilder.java new file mode 100644 index 000000000000..b51638a03da4 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequestOrBuilder.java @@ -0,0 +1,112 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface BatchCommitWriteStreamsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Parent table that all the streams should belong to, in the form
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Parent table that all the streams should belong to, in the form
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the writeStreams. + */ + java.util.List getWriteStreamsList(); + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of writeStreams. + */ + int getWriteStreamsCount(); + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The writeStreams at the given index. + */ + java.lang.String getWriteStreams(int index); + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the writeStreams at the given index. + */ + com.google.protobuf.ByteString getWriteStreamsBytes(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponse.java new file mode 100644 index 000000000000..b7a757aeb1cb --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponse.java @@ -0,0 +1,1388 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Response message for `BatchCommitWriteStreams`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse} + */ +public final class BatchCommitWriteStreamsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) + BatchCommitWriteStreamsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchCommitWriteStreamsResponse.newBuilder() to construct. + private BatchCommitWriteStreamsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCommitWriteStreamsResponse() { + streamErrors_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCommitWriteStreamsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse.class, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse.Builder.class); + } + + private int bitField0_; + public static final int COMMIT_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp commitTime_; + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + @java.lang.Override + public boolean hasCommitTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTime() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + + public static final int STREAM_ERRORS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List streamErrors_; + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + @java.lang.Override + public java.util.List getStreamErrorsList() { + return streamErrors_; + } + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + @java.lang.Override + public java.util.List + getStreamErrorsOrBuilderList() { + return streamErrors_; + } + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + @java.lang.Override + public int getStreamErrorsCount() { + return streamErrors_.size(); + } + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError getStreamErrors(int index) { + return streamErrors_.get(index); + } + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder getStreamErrorsOrBuilder( + int index) { + return streamErrors_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getCommitTime()); + } + for (int i = 0; i < streamErrors_.size(); i++) { + output.writeMessage(2, streamErrors_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommitTime()); + } + for (int i = 0; i < streamErrors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, streamErrors_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse other = + (com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) obj; + + if (hasCommitTime() != other.hasCommitTime()) return false; + if (hasCommitTime()) { + if (!getCommitTime().equals(other.getCommitTime())) return false; + } + if (!getStreamErrorsList().equals(other.getStreamErrorsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCommitTime()) { + hash = (37 * hash) + COMMIT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCommitTime().hashCode(); + } + if (getStreamErrorsCount() > 0) { + hash = (37 * hash) + STREAM_ERRORS_FIELD_NUMBER; + hash = (53 * hash) + getStreamErrorsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for `BatchCommitWriteStreams`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse.class, + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getCommitTimeFieldBuilder(); + getStreamErrorsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + commitTime_ = null; + if (commitTimeBuilder_ != null) { + commitTimeBuilder_.dispose(); + commitTimeBuilder_ = null; + } + if (streamErrorsBuilder_ == null) { + streamErrors_ = java.util.Collections.emptyList(); + } else { + streamErrors_ = null; + streamErrorsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse build() { + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse result = + new com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse result) { + if (streamErrorsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + streamErrors_ = java.util.Collections.unmodifiableList(streamErrors_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.streamErrors_ = streamErrors_; + } else { + result.streamErrors_ = streamErrorsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.commitTime_ = commitTimeBuilder_ == null ? commitTime_ : commitTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + .getDefaultInstance()) return this; + if (other.hasCommitTime()) { + mergeCommitTime(other.getCommitTime()); + } + if (streamErrorsBuilder_ == null) { + if (!other.streamErrors_.isEmpty()) { + if (streamErrors_.isEmpty()) { + streamErrors_ = other.streamErrors_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureStreamErrorsIsMutable(); + streamErrors_.addAll(other.streamErrors_); + } + onChanged(); + } + } else { + if (!other.streamErrors_.isEmpty()) { + if (streamErrorsBuilder_.isEmpty()) { + streamErrorsBuilder_.dispose(); + streamErrorsBuilder_ = null; + streamErrors_ = other.streamErrors_; + bitField0_ = (bitField0_ & ~0x00000002); + streamErrorsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getStreamErrorsFieldBuilder() + : null; + } else { + streamErrorsBuilder_.addAllMessages(other.streamErrors_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getCommitTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.bigquery.storage.v1.StorageError m = + input.readMessage( + com.google.cloud.bigquery.storage.v1.StorageError.parser(), + extensionRegistry); + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.add(m); + } else { + streamErrorsBuilder_.addMessage(m); + } + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp commitTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimeBuilder_; + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + public boolean hasCommitTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + public com.google.protobuf.Timestamp getCommitTime() { + if (commitTimeBuilder_ == null) { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } else { + return commitTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder setCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTime_ = value; + } else { + commitTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimeBuilder_ == null) { + commitTime_ = builderForValue.build(); + } else { + commitTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && commitTime_ != null + && commitTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCommitTimeBuilder().mergeFrom(value); + } else { + commitTime_ = value; + } + } else { + commitTimeBuilder_.mergeFrom(value); + } + if (commitTime_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder clearCommitTime() { + bitField0_ = (bitField0_ & ~0x00000001); + commitTime_ = null; + if (commitTimeBuilder_ != null) { + commitTimeBuilder_.dispose(); + commitTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getCommitTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + if (commitTimeBuilder_ != null) { + return commitTimeBuilder_.getMessageOrBuilder(); + } else { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCommitTimeFieldBuilder() { + if (commitTimeBuilder_ == null) { + commitTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTime(), getParentForChildren(), isClean()); + commitTime_ = null; + } + return commitTimeBuilder_; + } + + private java.util.List streamErrors_ = + java.util.Collections.emptyList(); + + private void ensureStreamErrorsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + streamErrors_ = + new java.util.ArrayList( + streamErrors_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.StorageError, + com.google.cloud.bigquery.storage.v1.StorageError.Builder, + com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder> + streamErrorsBuilder_; + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public java.util.List getStreamErrorsList() { + if (streamErrorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(streamErrors_); + } else { + return streamErrorsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public int getStreamErrorsCount() { + if (streamErrorsBuilder_ == null) { + return streamErrors_.size(); + } else { + return streamErrorsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1.StorageError getStreamErrors(int index) { + if (streamErrorsBuilder_ == null) { + return streamErrors_.get(index); + } else { + return streamErrorsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder setStreamErrors( + int index, com.google.cloud.bigquery.storage.v1.StorageError value) { + if (streamErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamErrorsIsMutable(); + streamErrors_.set(index, value); + onChanged(); + } else { + streamErrorsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder setStreamErrors( + int index, com.google.cloud.bigquery.storage.v1.StorageError.Builder builderForValue) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.set(index, builderForValue.build()); + onChanged(); + } else { + streamErrorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder addStreamErrors(com.google.cloud.bigquery.storage.v1.StorageError value) { + if (streamErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamErrorsIsMutable(); + streamErrors_.add(value); + onChanged(); + } else { + streamErrorsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder addStreamErrors( + int index, com.google.cloud.bigquery.storage.v1.StorageError value) { + if (streamErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamErrorsIsMutable(); + streamErrors_.add(index, value); + onChanged(); + } else { + streamErrorsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder addStreamErrors( + com.google.cloud.bigquery.storage.v1.StorageError.Builder builderForValue) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.add(builderForValue.build()); + onChanged(); + } else { + streamErrorsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder addStreamErrors( + int index, com.google.cloud.bigquery.storage.v1.StorageError.Builder builderForValue) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.add(index, builderForValue.build()); + onChanged(); + } else { + streamErrorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder addAllStreamErrors( + java.lang.Iterable values) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, streamErrors_); + onChanged(); + } else { + streamErrorsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder clearStreamErrors() { + if (streamErrorsBuilder_ == null) { + streamErrors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + streamErrorsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public Builder removeStreamErrors(int index) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.remove(index); + onChanged(); + } else { + streamErrorsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1.StorageError.Builder getStreamErrorsBuilder( + int index) { + return getStreamErrorsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder getStreamErrorsOrBuilder( + int index) { + if (streamErrorsBuilder_ == null) { + return streamErrors_.get(index); + } else { + return streamErrorsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public java.util.List + getStreamErrorsOrBuilderList() { + if (streamErrorsBuilder_ != null) { + return streamErrorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(streamErrors_); + } + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1.StorageError.Builder addStreamErrorsBuilder() { + return getStreamErrorsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1.StorageError.getDefaultInstance()); + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1.StorageError.Builder addStreamErrorsBuilder( + int index) { + return getStreamErrorsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1.StorageError.getDefaultInstance()); + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + public java.util.List + getStreamErrorsBuilderList() { + return getStreamErrorsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.StorageError, + com.google.cloud.bigquery.storage.v1.StorageError.Builder, + com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder> + getStreamErrorsFieldBuilder() { + if (streamErrorsBuilder_ == null) { + streamErrorsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.StorageError, + com.google.cloud.bigquery.storage.v1.StorageError.Builder, + com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder>( + streamErrors_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + streamErrors_ = null; + } + return streamErrorsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) + private static final com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCommitWriteStreamsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponseOrBuilder.java new file mode 100644 index 000000000000..21da18b0f90f --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponseOrBuilder.java @@ -0,0 +1,145 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface BatchCommitWriteStreamsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + boolean hasCommitTime(); + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + com.google.protobuf.Timestamp getCommitTime(); + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder(); + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + java.util.List getStreamErrorsList(); + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + com.google.cloud.bigquery.storage.v1.StorageError getStreamErrors(int index); + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + int getStreamErrorsCount(); + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + java.util.List + getStreamErrorsOrBuilderList(); + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.StorageError stream_errors = 2; + */ + com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder getStreamErrorsOrBuilder(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java new file mode 100644 index 000000000000..6517fefbd2a5 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java @@ -0,0 +1,1236 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `CreateReadSession`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.CreateReadSessionRequest} + */ +public final class CreateReadSessionRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.CreateReadSessionRequest) + CreateReadSessionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use CreateReadSessionRequest.newBuilder() to construct. + private CreateReadSessionRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateReadSessionRequest() { + parent_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateReadSessionRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateReadSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateReadSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest.class, + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. The request project that owns the session, in the form of
+   * `projects/{project_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The request project that owns the session, in the form of
+   * `projects/{project_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int READ_SESSION_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1.ReadSession readSession_; + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the readSession field is set. + */ + @java.lang.Override + public boolean hasReadSession() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The readSession. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession getReadSession() { + return readSession_ == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.getDefaultInstance() + : readSession_; + } + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSessionOrBuilder getReadSessionOrBuilder() { + return readSession_ == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.getDefaultInstance() + : readSession_; + } + + public static final int MAX_STREAM_COUNT_FIELD_NUMBER = 3; + private int maxStreamCount_ = 0; + + /** + * + * + *
+   * Max initial number of streams. If unset or zero, the server will
+   * provide a value of streams so as to produce reasonable throughput. Must be
+   * non-negative. The number of streams may be lower than the requested number,
+   * depending on the amount parallelism that is reasonable for the table.
+   * There is a default system max limit of 1,000.
+   *
+   * This must be greater than or equal to preferred_min_stream_count.
+   * Typically, clients should either leave this unset to let the system to
+   * determine an upper bound OR set this a size for the maximum "units of work"
+   * it can gracefully handle.
+   * 
+ * + * int32 max_stream_count = 3; + * + * @return The maxStreamCount. + */ + @java.lang.Override + public int getMaxStreamCount() { + return maxStreamCount_; + } + + public static final int PREFERRED_MIN_STREAM_COUNT_FIELD_NUMBER = 4; + private int preferredMinStreamCount_ = 0; + + /** + * + * + *
+   * The minimum preferred stream count. This parameter can be used to inform
+   * the service that there is a desired lower bound on the number of streams.
+   * This is typically a target parallelism of the client (e.g. a Spark
+   * cluster with N-workers would set this to a low multiple of N to ensure
+   * good cluster utilization).
+   *
+   * The system will make a best effort to provide at least this number of
+   * streams, but in some cases might provide less.
+   * 
+ * + * int32 preferred_min_stream_count = 4; + * + * @return The preferredMinStreamCount. + */ + @java.lang.Override + public int getPreferredMinStreamCount() { + return preferredMinStreamCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getReadSession()); + } + if (maxStreamCount_ != 0) { + output.writeInt32(3, maxStreamCount_); + } + if (preferredMinStreamCount_ != 0) { + output.writeInt32(4, preferredMinStreamCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getReadSession()); + } + if (maxStreamCount_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, maxStreamCount_); + } + if (preferredMinStreamCount_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, preferredMinStreamCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest other = + (com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasReadSession() != other.hasReadSession()) return false; + if (hasReadSession()) { + if (!getReadSession().equals(other.getReadSession())) return false; + } + if (getMaxStreamCount() != other.getMaxStreamCount()) return false; + if (getPreferredMinStreamCount() != other.getPreferredMinStreamCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasReadSession()) { + hash = (37 * hash) + READ_SESSION_FIELD_NUMBER; + hash = (53 * hash) + getReadSession().hashCode(); + } + hash = (37 * hash) + MAX_STREAM_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getMaxStreamCount(); + hash = (37 * hash) + PREFERRED_MIN_STREAM_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getPreferredMinStreamCount(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `CreateReadSession`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.CreateReadSessionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.CreateReadSessionRequest) + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateReadSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateReadSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest.class, + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getReadSessionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + readSession_ = null; + if (readSessionBuilder_ != null) { + readSessionBuilder_.dispose(); + readSessionBuilder_ = null; + } + maxStreamCount_ = 0; + preferredMinStreamCount_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateReadSessionRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest build() { + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest result = + new com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.readSession_ = + readSessionBuilder_ == null ? readSession_ : readSessionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.maxStreamCount_ = maxStreamCount_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.preferredMinStreamCount_ = preferredMinStreamCount_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasReadSession()) { + mergeReadSession(other.getReadSession()); + } + if (other.getMaxStreamCount() != 0) { + setMaxStreamCount(other.getMaxStreamCount()); + } + if (other.getPreferredMinStreamCount() != 0) { + setPreferredMinStreamCount(other.getPreferredMinStreamCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getReadSessionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + maxStreamCount_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + preferredMinStreamCount_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1.ReadSession readSession_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadSession, + com.google.cloud.bigquery.storage.v1.ReadSession.Builder, + com.google.cloud.bigquery.storage.v1.ReadSessionOrBuilder> + readSessionBuilder_; + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the readSession field is set. + */ + public boolean hasReadSession() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The readSession. + */ + public com.google.cloud.bigquery.storage.v1.ReadSession getReadSession() { + if (readSessionBuilder_ == null) { + return readSession_ == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.getDefaultInstance() + : readSession_; + } else { + return readSessionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReadSession(com.google.cloud.bigquery.storage.v1.ReadSession value) { + if (readSessionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readSession_ = value; + } else { + readSessionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReadSession( + com.google.cloud.bigquery.storage.v1.ReadSession.Builder builderForValue) { + if (readSessionBuilder_ == null) { + readSession_ = builderForValue.build(); + } else { + readSessionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeReadSession(com.google.cloud.bigquery.storage.v1.ReadSession value) { + if (readSessionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && readSession_ != null + && readSession_ + != com.google.cloud.bigquery.storage.v1.ReadSession.getDefaultInstance()) { + getReadSessionBuilder().mergeFrom(value); + } else { + readSession_ = value; + } + } else { + readSessionBuilder_.mergeFrom(value); + } + if (readSession_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearReadSession() { + bitField0_ = (bitField0_ & ~0x00000002); + readSession_ = null; + if (readSessionBuilder_ != null) { + readSessionBuilder_.dispose(); + readSessionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1.ReadSession.Builder getReadSessionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getReadSessionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1.ReadSessionOrBuilder getReadSessionOrBuilder() { + if (readSessionBuilder_ != null) { + return readSessionBuilder_.getMessageOrBuilder(); + } else { + return readSession_ == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.getDefaultInstance() + : readSession_; + } + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadSession, + com.google.cloud.bigquery.storage.v1.ReadSession.Builder, + com.google.cloud.bigquery.storage.v1.ReadSessionOrBuilder> + getReadSessionFieldBuilder() { + if (readSessionBuilder_ == null) { + readSessionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadSession, + com.google.cloud.bigquery.storage.v1.ReadSession.Builder, + com.google.cloud.bigquery.storage.v1.ReadSessionOrBuilder>( + getReadSession(), getParentForChildren(), isClean()); + readSession_ = null; + } + return readSessionBuilder_; + } + + private int maxStreamCount_; + + /** + * + * + *
+     * Max initial number of streams. If unset or zero, the server will
+     * provide a value of streams so as to produce reasonable throughput. Must be
+     * non-negative. The number of streams may be lower than the requested number,
+     * depending on the amount parallelism that is reasonable for the table.
+     * There is a default system max limit of 1,000.
+     *
+     * This must be greater than or equal to preferred_min_stream_count.
+     * Typically, clients should either leave this unset to let the system to
+     * determine an upper bound OR set this a size for the maximum "units of work"
+     * it can gracefully handle.
+     * 
+ * + * int32 max_stream_count = 3; + * + * @return The maxStreamCount. + */ + @java.lang.Override + public int getMaxStreamCount() { + return maxStreamCount_; + } + + /** + * + * + *
+     * Max initial number of streams. If unset or zero, the server will
+     * provide a value of streams so as to produce reasonable throughput. Must be
+     * non-negative. The number of streams may be lower than the requested number,
+     * depending on the amount parallelism that is reasonable for the table.
+     * There is a default system max limit of 1,000.
+     *
+     * This must be greater than or equal to preferred_min_stream_count.
+     * Typically, clients should either leave this unset to let the system to
+     * determine an upper bound OR set this a size for the maximum "units of work"
+     * it can gracefully handle.
+     * 
+ * + * int32 max_stream_count = 3; + * + * @param value The maxStreamCount to set. + * @return This builder for chaining. + */ + public Builder setMaxStreamCount(int value) { + + maxStreamCount_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Max initial number of streams. If unset or zero, the server will
+     * provide a value of streams so as to produce reasonable throughput. Must be
+     * non-negative. The number of streams may be lower than the requested number,
+     * depending on the amount parallelism that is reasonable for the table.
+     * There is a default system max limit of 1,000.
+     *
+     * This must be greater than or equal to preferred_min_stream_count.
+     * Typically, clients should either leave this unset to let the system to
+     * determine an upper bound OR set this a size for the maximum "units of work"
+     * it can gracefully handle.
+     * 
+ * + * int32 max_stream_count = 3; + * + * @return This builder for chaining. + */ + public Builder clearMaxStreamCount() { + bitField0_ = (bitField0_ & ~0x00000004); + maxStreamCount_ = 0; + onChanged(); + return this; + } + + private int preferredMinStreamCount_; + + /** + * + * + *
+     * The minimum preferred stream count. This parameter can be used to inform
+     * the service that there is a desired lower bound on the number of streams.
+     * This is typically a target parallelism of the client (e.g. a Spark
+     * cluster with N-workers would set this to a low multiple of N to ensure
+     * good cluster utilization).
+     *
+     * The system will make a best effort to provide at least this number of
+     * streams, but in some cases might provide less.
+     * 
+ * + * int32 preferred_min_stream_count = 4; + * + * @return The preferredMinStreamCount. + */ + @java.lang.Override + public int getPreferredMinStreamCount() { + return preferredMinStreamCount_; + } + + /** + * + * + *
+     * The minimum preferred stream count. This parameter can be used to inform
+     * the service that there is a desired lower bound on the number of streams.
+     * This is typically a target parallelism of the client (e.g. a Spark
+     * cluster with N-workers would set this to a low multiple of N to ensure
+     * good cluster utilization).
+     *
+     * The system will make a best effort to provide at least this number of
+     * streams, but in some cases might provide less.
+     * 
+ * + * int32 preferred_min_stream_count = 4; + * + * @param value The preferredMinStreamCount to set. + * @return This builder for chaining. + */ + public Builder setPreferredMinStreamCount(int value) { + + preferredMinStreamCount_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * The minimum preferred stream count. This parameter can be used to inform
+     * the service that there is a desired lower bound on the number of streams.
+     * This is typically a target parallelism of the client (e.g. a Spark
+     * cluster with N-workers would set this to a low multiple of N to ensure
+     * good cluster utilization).
+     *
+     * The system will make a best effort to provide at least this number of
+     * streams, but in some cases might provide less.
+     * 
+ * + * int32 preferred_min_stream_count = 4; + * + * @return This builder for chaining. + */ + public Builder clearPreferredMinStreamCount() { + bitField0_ = (bitField0_ & ~0x00000008); + preferredMinStreamCount_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.CreateReadSessionRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.CreateReadSessionRequest) + private static final com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateReadSessionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java new file mode 100644 index 000000000000..488e3162916e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java @@ -0,0 +1,143 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface CreateReadSessionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.CreateReadSessionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The request project that owns the session, in the form of
+   * `projects/{project_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. The request project that owns the session, in the form of
+   * `projects/{project_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the readSession field is set. + */ + boolean hasReadSession(); + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The readSession. + */ + com.google.cloud.bigquery.storage.v1.ReadSession getReadSession(); + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1.ReadSessionOrBuilder getReadSessionOrBuilder(); + + /** + * + * + *
+   * Max initial number of streams. If unset or zero, the server will
+   * provide a value of streams so as to produce reasonable throughput. Must be
+   * non-negative. The number of streams may be lower than the requested number,
+   * depending on the amount parallelism that is reasonable for the table.
+   * There is a default system max limit of 1,000.
+   *
+   * This must be greater than or equal to preferred_min_stream_count.
+   * Typically, clients should either leave this unset to let the system to
+   * determine an upper bound OR set this a size for the maximum "units of work"
+   * it can gracefully handle.
+   * 
+ * + * int32 max_stream_count = 3; + * + * @return The maxStreamCount. + */ + int getMaxStreamCount(); + + /** + * + * + *
+   * The minimum preferred stream count. This parameter can be used to inform
+   * the service that there is a desired lower bound on the number of streams.
+   * This is typically a target parallelism of the client (e.g. a Spark
+   * cluster with N-workers would set this to a low multiple of N to ensure
+   * good cluster utilization).
+   *
+   * The system will make a best effort to provide at least this number of
+   * streams, but in some cases might provide less.
+   * 
+ * + * int32 preferred_min_stream_count = 4; + * + * @return The preferredMinStreamCount. + */ + int getPreferredMinStreamCount(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequest.java new file mode 100644 index 000000000000..62c469311377 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequest.java @@ -0,0 +1,978 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `CreateWriteStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.CreateWriteStreamRequest} + */ +public final class CreateWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) + CreateWriteStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use CreateWriteStreamRequest.newBuilder() to construct. + private CreateWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateWriteStreamRequest() { + parent_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateWriteStreamRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to which the stream belongs, in the format
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to which the stream belongs, in the format
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int WRITE_STREAM_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1.WriteStream writeStream_; + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeStream field is set. + */ + @java.lang.Override + public boolean hasWriteStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeStream. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream getWriteStream() { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.getDefaultInstance() + : writeStream_; + } + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder getWriteStreamOrBuilder() { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.getDefaultInstance() + : writeStream_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getWriteStream()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getWriteStream()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest other = + (com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasWriteStream() != other.hasWriteStream()) return false; + if (hasWriteStream()) { + if (!getWriteStream().equals(other.getWriteStream())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasWriteStream()) { + hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getWriteStream().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `CreateWriteStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.CreateWriteStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getWriteStreamFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + writeStream_ = null; + if (writeStreamBuilder_ != null) { + writeStreamBuilder_.dispose(); + writeStreamBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest build() { + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest result = + new com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.writeStream_ = + writeStreamBuilder_ == null ? writeStream_ : writeStreamBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasWriteStream()) { + mergeWriteStream(other.getWriteStream()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getWriteStreamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1.WriteStream writeStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.WriteStream, + com.google.cloud.bigquery.storage.v1.WriteStream.Builder, + com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder> + writeStreamBuilder_; + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeStream field is set. + */ + public boolean hasWriteStream() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeStream. + */ + public com.google.cloud.bigquery.storage.v1.WriteStream getWriteStream() { + if (writeStreamBuilder_ == null) { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.getDefaultInstance() + : writeStream_; + } else { + return writeStreamBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setWriteStream(com.google.cloud.bigquery.storage.v1.WriteStream value) { + if (writeStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writeStream_ = value; + } else { + writeStreamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setWriteStream( + com.google.cloud.bigquery.storage.v1.WriteStream.Builder builderForValue) { + if (writeStreamBuilder_ == null) { + writeStream_ = builderForValue.build(); + } else { + writeStreamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeWriteStream(com.google.cloud.bigquery.storage.v1.WriteStream value) { + if (writeStreamBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && writeStream_ != null + && writeStream_ + != com.google.cloud.bigquery.storage.v1.WriteStream.getDefaultInstance()) { + getWriteStreamBuilder().mergeFrom(value); + } else { + writeStream_ = value; + } + } else { + writeStreamBuilder_.mergeFrom(value); + } + if (writeStream_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearWriteStream() { + bitField0_ = (bitField0_ & ~0x00000002); + writeStream_ = null; + if (writeStreamBuilder_ != null) { + writeStreamBuilder_.dispose(); + writeStreamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1.WriteStream.Builder getWriteStreamBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getWriteStreamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder getWriteStreamOrBuilder() { + if (writeStreamBuilder_ != null) { + return writeStreamBuilder_.getMessageOrBuilder(); + } else { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.getDefaultInstance() + : writeStream_; + } + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.WriteStream, + com.google.cloud.bigquery.storage.v1.WriteStream.Builder, + com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder> + getWriteStreamFieldBuilder() { + if (writeStreamBuilder_ == null) { + writeStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.WriteStream, + com.google.cloud.bigquery.storage.v1.WriteStream.Builder, + com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder>( + getWriteStream(), getParentForChildren(), isClean()); + writeStream_ = null; + } + return writeStreamBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) + private static final com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateWriteStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequestOrBuilder.java new file mode 100644 index 000000000000..1ae0342cd714 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequestOrBuilder.java @@ -0,0 +1,101 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface CreateWriteStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.CreateWriteStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to which the stream belongs, in the format
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to which the stream belongs, in the format
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeStream field is set. + */ + boolean hasWriteStream(); + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeStream. + */ + com.google.cloud.bigquery.storage.v1.WriteStream getWriteStream(); + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder getWriteStreamOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/DataFormat.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/DataFormat.java new file mode 100644 index 000000000000..44c65cab60bd --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/DataFormat.java @@ -0,0 +1,183 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Data format for input or output data.
+ * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1.DataFormat} + */ +public enum DataFormat implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+   * Data format is unspecified.
+   * 
+ * + * DATA_FORMAT_UNSPECIFIED = 0; + */ + DATA_FORMAT_UNSPECIFIED(0), + /** + * + * + *
+   * Avro is a standard open source row based file format.
+   * See https://avro.apache.org/ for more details.
+   * 
+ * + * AVRO = 1; + */ + AVRO(1), + /** + * + * + *
+   * Arrow is a standard open source column-based message format.
+   * See https://arrow.apache.org/ for more details.
+   * 
+ * + * ARROW = 2; + */ + ARROW(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+   * Data format is unspecified.
+   * 
+ * + * DATA_FORMAT_UNSPECIFIED = 0; + */ + public static final int DATA_FORMAT_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+   * Avro is a standard open source row based file format.
+   * See https://avro.apache.org/ for more details.
+   * 
+ * + * AVRO = 1; + */ + public static final int AVRO_VALUE = 1; + + /** + * + * + *
+   * Arrow is a standard open source column-based message format.
+   * See https://arrow.apache.org/ for more details.
+   * 
+ * + * ARROW = 2; + */ + public static final int ARROW_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static DataFormat valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static DataFormat forNumber(int value) { + switch (value) { + case 0: + return DATA_FORMAT_UNSPECIFIED; + case 1: + return AVRO; + case 2: + return ARROW; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public DataFormat findValueByNumber(int number) { + return DataFormat.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto.getDescriptor().getEnumTypes().get(0); + } + + private static final DataFormat[] VALUES = values(); + + public static DataFormat valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private DataFormat(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.DataFormat) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequest.java new file mode 100644 index 000000000000..0aabf054aa4a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequest.java @@ -0,0 +1,663 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for invoking `FinalizeWriteStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest} + */ +public final class FinalizeWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) + FinalizeWriteStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use FinalizeWriteStreamRequest.newBuilder() to construct. + private FinalizeWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FinalizeWriteStreamRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FinalizeWriteStreamRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the stream to finalize, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the stream to finalize, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest other = + (com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for invoking `FinalizeWriteStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest build() { + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest result = + new com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) + private static final com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FinalizeWriteStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequestOrBuilder.java new file mode 100644 index 000000000000..2ae4bd37c8c3 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequestOrBuilder.java @@ -0,0 +1,58 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface FinalizeWriteStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the stream to finalize, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the stream to finalize, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponse.java new file mode 100644 index 000000000000..d34445e8205d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponse.java @@ -0,0 +1,549 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Response message for `FinalizeWriteStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse} + */ +public final class FinalizeWriteStreamResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) + FinalizeWriteStreamResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use FinalizeWriteStreamResponse.newBuilder() to construct. + private FinalizeWriteStreamResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FinalizeWriteStreamResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FinalizeWriteStreamResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.class, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.Builder.class); + } + + public static final int ROW_COUNT_FIELD_NUMBER = 1; + private long rowCount_ = 0L; + + /** + * + * + *
+   * Number of rows in the finalized stream.
+   * 
+ * + * int64 row_count = 1; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (rowCount_ != 0L) { + output.writeInt64(1, rowCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (rowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, rowCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse other = + (com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) obj; + + if (getRowCount() != other.getRowCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for `FinalizeWriteStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.class, + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + rowCount_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse build() { + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse buildPartial() { + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse result = + new com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.rowCount_ = rowCount_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse.getDefaultInstance()) + return this; + if (other.getRowCount() != 0L) { + setRowCount(other.getRowCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + rowCount_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long rowCount_; + + /** + * + * + *
+     * Number of rows in the finalized stream.
+     * 
+ * + * int64 row_count = 1; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + /** + * + * + *
+     * Number of rows in the finalized stream.
+     * 
+ * + * int64 row_count = 1; + * + * @param value The rowCount to set. + * @return This builder for chaining. + */ + public Builder setRowCount(long value) { + + rowCount_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Number of rows in the finalized stream.
+     * 
+ * + * int64 row_count = 1; + * + * @return This builder for chaining. + */ + public Builder clearRowCount() { + bitField0_ = (bitField0_ & ~0x00000001); + rowCount_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) + private static final com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse(); + } + + public static com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FinalizeWriteStreamResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponseOrBuilder.java new file mode 100644 index 000000000000..c7b5797fb338 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponseOrBuilder.java @@ -0,0 +1,39 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface FinalizeWriteStreamResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Number of rows in the finalized stream.
+   * 
+ * + * int64 row_count = 1; + * + * @return The rowCount. + */ + long getRowCount(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequest.java new file mode 100644 index 000000000000..d3fbae6f53bd --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequest.java @@ -0,0 +1,944 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `FlushRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FlushRowsRequest} + */ +public final class FlushRowsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.FlushRowsRequest) + FlushRowsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use FlushRowsRequest.newBuilder() to construct. + private FlushRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FlushRowsRequest() { + writeStream_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FlushRowsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest.class, + com.google.cloud.bigquery.storage.v1.FlushRowsRequest.Builder.class); + } + + private int bitField0_; + public static final int WRITE_STREAM_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object writeStream_ = ""; + + /** + * + * + *
+   * Required. The stream that is the target of the flush operation.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + @java.lang.Override + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The stream that is the target of the flush operation.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + @java.lang.Override + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OFFSET_FIELD_NUMBER = 2; + private com.google.protobuf.Int64Value offset_; + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + @java.lang.Override + public boolean hasOffset() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + @java.lang.Override + public com.google.protobuf.Int64Value getOffset() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + @java.lang.Override + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, writeStream_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getOffset()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, writeStream_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOffset()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.FlushRowsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.FlushRowsRequest other = + (com.google.cloud.bigquery.storage.v1.FlushRowsRequest) obj; + + if (!getWriteStream().equals(other.getWriteStream())) return false; + if (hasOffset() != other.hasOffset()) return false; + if (hasOffset()) { + if (!getOffset().equals(other.getOffset())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getWriteStream().hashCode(); + if (hasOffset()) { + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + getOffset().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `FlushRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FlushRowsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.FlushRowsRequest) + com.google.cloud.bigquery.storage.v1.FlushRowsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FlushRowsRequest.class, + com.google.cloud.bigquery.storage.v1.FlushRowsRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.FlushRowsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getOffsetFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + writeStream_ = ""; + offset_ = null; + if (offsetBuilder_ != null) { + offsetBuilder_.dispose(); + offsetBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsRequest getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.FlushRowsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsRequest build() { + com.google.cloud.bigquery.storage.v1.FlushRowsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.FlushRowsRequest result = + new com.google.cloud.bigquery.storage.v1.FlushRowsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.FlushRowsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.writeStream_ = writeStream_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.offset_ = offsetBuilder_ == null ? offset_ : offsetBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.FlushRowsRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.FlushRowsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.FlushRowsRequest other) { + if (other == com.google.cloud.bigquery.storage.v1.FlushRowsRequest.getDefaultInstance()) + return this; + if (!other.getWriteStream().isEmpty()) { + writeStream_ = other.writeStream_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasOffset()) { + mergeOffset(other.getOffset()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + writeStream_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getOffsetFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object writeStream_ = ""; + + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStream(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + writeStream_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearWriteStream() { + writeStream_ = getDefaultInstance().getWriteStream(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + writeStream_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Int64Value offset_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + offsetBuilder_; + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + public boolean hasOffset() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + public com.google.protobuf.Int64Value getOffset() { + if (offsetBuilder_ == null) { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } else { + return offsetBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder setOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + offset_ = value; + } else { + offsetBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder setOffset(com.google.protobuf.Int64Value.Builder builderForValue) { + if (offsetBuilder_ == null) { + offset_ = builderForValue.build(); + } else { + offsetBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder mergeOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && offset_ != null + && offset_ != com.google.protobuf.Int64Value.getDefaultInstance()) { + getOffsetBuilder().mergeFrom(value); + } else { + offset_ = value; + } + } else { + offsetBuilder_.mergeFrom(value); + } + if (offset_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder clearOffset() { + bitField0_ = (bitField0_ & ~0x00000002); + offset_ = null; + if (offsetBuilder_ != null) { + offsetBuilder_.dispose(); + offsetBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public com.google.protobuf.Int64Value.Builder getOffsetBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getOffsetFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + if (offsetBuilder_ != null) { + return offsetBuilder_.getMessageOrBuilder(); + } else { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + getOffsetFieldBuilder() { + if (offsetBuilder_ == null) { + offsetBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder>( + getOffset(), getParentForChildren(), isClean()); + offset_ = null; + } + return offsetBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.FlushRowsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.FlushRowsRequest) + private static final com.google.cloud.bigquery.storage.v1.FlushRowsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.FlushRowsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FlushRowsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequestOrBuilder.java new file mode 100644 index 000000000000..21472e5d7277 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequestOrBuilder.java @@ -0,0 +1,96 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface FlushRowsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.FlushRowsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The stream that is the target of the flush operation.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + java.lang.String getWriteStream(); + + /** + * + * + *
+   * Required. The stream that is the target of the flush operation.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + com.google.protobuf.ByteString getWriteStreamBytes(); + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + boolean hasOffset(); + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + com.google.protobuf.Int64Value getOffset(); + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponse.java new file mode 100644 index 000000000000..dc8e02a219c7 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponse.java @@ -0,0 +1,542 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Respond message for `FlushRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FlushRowsResponse} + */ +public final class FlushRowsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.FlushRowsResponse) + FlushRowsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use FlushRowsResponse.newBuilder() to construct. + private FlushRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FlushRowsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FlushRowsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FlushRowsResponse.class, + com.google.cloud.bigquery.storage.v1.FlushRowsResponse.Builder.class); + } + + public static final int OFFSET_FIELD_NUMBER = 1; + private long offset_ = 0L; + + /** + * + * + *
+   * The rows before this offset (including this offset) are flushed.
+   * 
+ * + * int64 offset = 1; + * + * @return The offset. + */ + @java.lang.Override + public long getOffset() { + return offset_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (offset_ != 0L) { + output.writeInt64(1, offset_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (offset_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, offset_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.FlushRowsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.FlushRowsResponse other = + (com.google.cloud.bigquery.storage.v1.FlushRowsResponse) obj; + + if (getOffset() != other.getOffset()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.FlushRowsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Respond message for `FlushRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.FlushRowsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.FlushRowsResponse) + com.google.cloud.bigquery.storage.v1.FlushRowsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.FlushRowsResponse.class, + com.google.cloud.bigquery.storage.v1.FlushRowsResponse.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.FlushRowsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + offset_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsResponse getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.FlushRowsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsResponse build() { + com.google.cloud.bigquery.storage.v1.FlushRowsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1.FlushRowsResponse result = + new com.google.cloud.bigquery.storage.v1.FlushRowsResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.FlushRowsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.offset_ = offset_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.FlushRowsResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.FlushRowsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.FlushRowsResponse other) { + if (other == com.google.cloud.bigquery.storage.v1.FlushRowsResponse.getDefaultInstance()) + return this; + if (other.getOffset() != 0L) { + setOffset(other.getOffset()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + offset_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long offset_; + + /** + * + * + *
+     * The rows before this offset (including this offset) are flushed.
+     * 
+ * + * int64 offset = 1; + * + * @return The offset. + */ + @java.lang.Override + public long getOffset() { + return offset_; + } + + /** + * + * + *
+     * The rows before this offset (including this offset) are flushed.
+     * 
+ * + * int64 offset = 1; + * + * @param value The offset to set. + * @return This builder for chaining. + */ + public Builder setOffset(long value) { + + offset_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The rows before this offset (including this offset) are flushed.
+     * 
+ * + * int64 offset = 1; + * + * @return This builder for chaining. + */ + public Builder clearOffset() { + bitField0_ = (bitField0_ & ~0x00000001); + offset_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.FlushRowsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.FlushRowsResponse) + private static final com.google.cloud.bigquery.storage.v1.FlushRowsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.FlushRowsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1.FlushRowsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FlushRowsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.FlushRowsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponseOrBuilder.java new file mode 100644 index 000000000000..64a56f97cead --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponseOrBuilder.java @@ -0,0 +1,39 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface FlushRowsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.FlushRowsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The rows before this offset (including this offset) are flushed.
+   * 
+ * + * int64 offset = 1; + * + * @return The offset. + */ + long getOffset(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequest.java new file mode 100644 index 000000000000..ddc78090e3b9 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequest.java @@ -0,0 +1,826 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `GetWriteStreamRequest`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.GetWriteStreamRequest} + */ +public final class GetWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.GetWriteStreamRequest) + GetWriteStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use GetWriteStreamRequest.newBuilder() to construct. + private GetWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private GetWriteStreamRequest() { + name_ = ""; + view_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new GetWriteStreamRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the stream to get, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the stream to get, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VIEW_FIELD_NUMBER = 3; + private int view_ = 0; + + /** + * + * + *
+   * Indicates whether to get full or partial view of the WriteStream. If
+   * not set, view returned will be basic.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.WriteStreamView view = 3; + * + * @return The enum numeric value on the wire for view. + */ + @java.lang.Override + public int getViewValue() { + return view_; + } + + /** + * + * + *
+   * Indicates whether to get full or partial view of the WriteStream. If
+   * not set, view returned will be basic.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.WriteStreamView view = 3; + * + * @return The view. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStreamView getView() { + com.google.cloud.bigquery.storage.v1.WriteStreamView result = + com.google.cloud.bigquery.storage.v1.WriteStreamView.forNumber(view_); + return result == null + ? com.google.cloud.bigquery.storage.v1.WriteStreamView.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (view_ + != com.google.cloud.bigquery.storage.v1.WriteStreamView.WRITE_STREAM_VIEW_UNSPECIFIED + .getNumber()) { + output.writeEnum(3, view_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (view_ + != com.google.cloud.bigquery.storage.v1.WriteStreamView.WRITE_STREAM_VIEW_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, view_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest other = + (com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (view_ != other.view_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + VIEW_FIELD_NUMBER; + hash = (53 * hash) + view_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `GetWriteStreamRequest`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.GetWriteStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.GetWriteStreamRequest) + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + view_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest build() { + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest result = + new com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.view_ = view_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest other) { + if (other == com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.view_ != 0) { + setViewValue(other.getViewValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 24: + { + view_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int view_ = 0; + + /** + * + * + *
+     * Indicates whether to get full or partial view of the WriteStream. If
+     * not set, view returned will be basic.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.WriteStreamView view = 3; + * + * @return The enum numeric value on the wire for view. + */ + @java.lang.Override + public int getViewValue() { + return view_; + } + + /** + * + * + *
+     * Indicates whether to get full or partial view of the WriteStream. If
+     * not set, view returned will be basic.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.WriteStreamView view = 3; + * + * @param value The enum numeric value on the wire for view to set. + * @return This builder for chaining. + */ + public Builder setViewValue(int value) { + view_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Indicates whether to get full or partial view of the WriteStream. If
+     * not set, view returned will be basic.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.WriteStreamView view = 3; + * + * @return The view. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStreamView getView() { + com.google.cloud.bigquery.storage.v1.WriteStreamView result = + com.google.cloud.bigquery.storage.v1.WriteStreamView.forNumber(view_); + return result == null + ? com.google.cloud.bigquery.storage.v1.WriteStreamView.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Indicates whether to get full or partial view of the WriteStream. If
+     * not set, view returned will be basic.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.WriteStreamView view = 3; + * + * @param value The view to set. + * @return This builder for chaining. + */ + public Builder setView(com.google.cloud.bigquery.storage.v1.WriteStreamView value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + view_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Indicates whether to get full or partial view of the WriteStream. If
+     * not set, view returned will be basic.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.WriteStreamView view = 3; + * + * @return This builder for chaining. + */ + public Builder clearView() { + bitField0_ = (bitField0_ & ~0x00000002); + view_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.GetWriteStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.GetWriteStreamRequest) + private static final com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetWriteStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequestOrBuilder.java new file mode 100644 index 000000000000..9c3a062f974c --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequestOrBuilder.java @@ -0,0 +1,86 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface GetWriteStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.GetWriteStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the stream to get, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the stream to get, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Indicates whether to get full or partial view of the WriteStream. If
+   * not set, view returned will be basic.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.WriteStreamView view = 3; + * + * @return The enum numeric value on the wire for view. + */ + int getViewValue(); + + /** + * + * + *
+   * Indicates whether to get full or partial view of the WriteStream. If
+   * not set, view returned will be basic.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.WriteStreamView view = 3; + * + * @return The view. + */ + com.google.cloud.bigquery.storage.v1.WriteStreamView getView(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProjectName.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProjectName.java new file mode 100644 index 000000000000..e8e97004fed9 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProjectName.java @@ -0,0 +1,168 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ProjectName implements ResourceName { + private static final PathTemplate PROJECT = + PathTemplate.createWithoutUrlEncoding("projects/{project}"); + private volatile Map fieldValuesMap; + private final String project; + + @Deprecated + protected ProjectName() { + project = null; + } + + private ProjectName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + } + + public String getProject() { + return project; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ProjectName of(String project) { + return newBuilder().setProject(project).build(); + } + + public static String format(String project) { + return newBuilder().setProject(project).build().toString(); + } + + public static ProjectName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT.validatedMatch( + formattedString, "ProjectName.parse: formattedString not in valid format"); + return of(matchMap.get("project")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ProjectName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT.instantiate("project", project); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + ProjectName that = ((ProjectName) o); + return Objects.equals(this.project, that.project); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + return h; + } + + /** Builder for projects/{project}. */ + public static class Builder { + private String project; + + protected Builder() {} + + public String getProject() { + return project; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + private Builder(ProjectName projectName) { + this.project = projectName.project; + } + + public ProjectName build() { + return new ProjectName(this); + } + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoBufProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoBufProto.java new file mode 100644 index 000000000000..eda883cf998b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoBufProto.java @@ -0,0 +1,86 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/protobuf.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public final class ProtoBufProto { + private ProtoBufProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ProtoRows_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ProtoRows_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n/google/cloud/bigquery/storage/v1/proto" + + "buf.proto\022 google.cloud.bigquery.storage" + + ".v1\032 google/protobuf/descriptor.proto\"I\n" + + "\013ProtoSchema\022:\n\020proto_descriptor\030\001 \001(\0132 " + + ".google.protobuf.DescriptorProto\"$\n\tProt" + + "oRows\022\027\n\017serialized_rows\030\001 \003(\014B\275\001\n$com.g" + + "oogle.cloud.bigquery.storage.v1B\rProtoBu" + + "fProtoP\001Z>cloud.google.com/go/bigquery/s" + + "torage/apiv1/storagepb;storagepb\252\002 Googl" + + "e.Cloud.BigQuery.Storage.V1\312\002 Google\\Clo" + + "ud\\BigQuery\\Storage\\V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.DescriptorProtos.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_descriptor, + new java.lang.String[] { + "ProtoDescriptor", + }); + internal_static_google_cloud_bigquery_storage_v1_ProtoRows_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1_ProtoRows_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ProtoRows_descriptor, + new java.lang.String[] { + "SerializedRows", + }); + com.google.protobuf.DescriptorProtos.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRows.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRows.java new file mode 100644 index 000000000000..28b0b47af40f --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRows.java @@ -0,0 +1,695 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/protobuf.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** Protobuf type {@code google.cloud.bigquery.storage.v1.ProtoRows} */ +public final class ProtoRows extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ProtoRows) + ProtoRowsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ProtoRows.newBuilder() to construct. + private ProtoRows(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ProtoRows() { + serializedRows_ = emptyList(com.google.protobuf.ByteString.class); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ProtoRows(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ProtoRows.class, + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder.class); + } + + public static final int SERIALIZED_ROWS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.Internal.ProtobufList + serializedRows_ = emptyList(com.google.protobuf.ByteString.class); + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   *
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return A list containing the serializedRows. + */ + @java.lang.Override + public java.util.List getSerializedRowsList() { + return serializedRows_; + } + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   *
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return The count of serializedRows. + */ + public int getSerializedRowsCount() { + return serializedRows_.size(); + } + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   *
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index of the element to return. + * @return The serializedRows at the given index. + */ + public com.google.protobuf.ByteString getSerializedRows(int index) { + return serializedRows_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < serializedRows_.size(); i++) { + output.writeBytes(1, serializedRows_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < serializedRows_.size(); i++) { + dataSize += + com.google.protobuf.CodedOutputStream.computeBytesSizeNoTag(serializedRows_.get(i)); + } + size += dataSize; + size += 1 * getSerializedRowsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ProtoRows)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ProtoRows other = + (com.google.cloud.bigquery.storage.v1.ProtoRows) obj; + + if (!getSerializedRowsList().equals(other.getSerializedRowsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getSerializedRowsCount() > 0) { + hash = (37 * hash) + SERIALIZED_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getSerializedRowsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.ProtoRows prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code google.cloud.bigquery.storage.v1.ProtoRows} */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ProtoRows) + com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ProtoRows.class, + com.google.cloud.bigquery.storage.v1.ProtoRows.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.ProtoRows.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + serializedRows_ = emptyList(com.google.protobuf.ByteString.class); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoRows_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoRows getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ProtoRows.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoRows build() { + com.google.cloud.bigquery.storage.v1.ProtoRows result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoRows buildPartial() { + com.google.cloud.bigquery.storage.v1.ProtoRows result = + new com.google.cloud.bigquery.storage.v1.ProtoRows(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.ProtoRows result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + serializedRows_.makeImmutable(); + result.serializedRows_ = serializedRows_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ProtoRows) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ProtoRows) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ProtoRows other) { + if (other == com.google.cloud.bigquery.storage.v1.ProtoRows.getDefaultInstance()) return this; + if (!other.serializedRows_.isEmpty()) { + if (serializedRows_.isEmpty()) { + serializedRows_ = other.serializedRows_; + serializedRows_.makeImmutable(); + bitField0_ |= 0x00000001; + } else { + ensureSerializedRowsIsMutable(); + serializedRows_.addAll(other.serializedRows_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.protobuf.ByteString v = input.readBytes(); + ensureSerializedRowsIsMutable(); + serializedRows_.add(v); + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Internal.ProtobufList + serializedRows_ = emptyList(com.google.protobuf.ByteString.class); + + private void ensureSerializedRowsIsMutable() { + if (!serializedRows_.isModifiable()) { + serializedRows_ = makeMutableCopy(serializedRows_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return A list containing the serializedRows. + */ + public java.util.List getSerializedRowsList() { + serializedRows_.makeImmutable(); + return serializedRows_; + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return The count of serializedRows. + */ + public int getSerializedRowsCount() { + return serializedRows_.size(); + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index of the element to return. + * @return The serializedRows at the given index. + */ + public com.google.protobuf.ByteString getSerializedRows(int index) { + return serializedRows_.get(index); + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index to set the value at. + * @param value The serializedRows to set. + * @return This builder for chaining. + */ + public Builder setSerializedRows(int index, com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSerializedRowsIsMutable(); + serializedRows_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param value The serializedRows to add. + * @return This builder for chaining. + */ + public Builder addSerializedRows(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSerializedRowsIsMutable(); + serializedRows_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param values The serializedRows to add. + * @return This builder for chaining. + */ + public Builder addAllSerializedRows( + java.lang.Iterable values) { + ensureSerializedRowsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, serializedRows_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedRows() { + serializedRows_ = emptyList(com.google.protobuf.ByteString.class); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ProtoRows) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ProtoRows) + private static final com.google.cloud.bigquery.storage.v1.ProtoRows DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ProtoRows(); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoRows getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProtoRows parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoRows getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRowsOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRowsOrBuilder.java new file mode 100644 index 000000000000..6537a430f57a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRowsOrBuilder.java @@ -0,0 +1,75 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/protobuf.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface ProtoRowsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ProtoRows) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   *
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return A list containing the serializedRows. + */ + java.util.List getSerializedRowsList(); + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   *
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return The count of serializedRows. + */ + int getSerializedRowsCount(); + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   *
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index of the element to return. + * @return The serializedRows at the given index. + */ + com.google.protobuf.ByteString getSerializedRows(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchema.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchema.java new file mode 100644 index 000000000000..868f2503c82d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchema.java @@ -0,0 +1,849 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/protobuf.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * ProtoSchema describes the schema of the serialized protocol buffer data rows.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ProtoSchema} + */ +public final class ProtoSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ProtoSchema) + ProtoSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ProtoSchema.newBuilder() to construct. + private ProtoSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ProtoSchema() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ProtoSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ProtoSchema.class, + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder.class); + } + + private int bitField0_; + public static final int PROTO_DESCRIPTOR_FIELD_NUMBER = 1; + private com.google.protobuf.DescriptorProtos.DescriptorProto protoDescriptor_; + + /** + * + * + *
+   * Descriptor for input message.  The provided descriptor must be self
+   * contained, such that data rows sent can be fully decoded using only the
+   * single descriptor.  For data rows that are compositions of multiple
+   * independent messages, this means the descriptor may need to be transformed
+   * to only use nested types:
+   * https://developers.google.com/protocol-buffers/docs/proto#nested
+   *
+   * For additional information for how proto types and values map onto BigQuery
+   * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return Whether the protoDescriptor field is set. + */ + @java.lang.Override + public boolean hasProtoDescriptor() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Descriptor for input message.  The provided descriptor must be self
+   * contained, such that data rows sent can be fully decoded using only the
+   * single descriptor.  For data rows that are compositions of multiple
+   * independent messages, this means the descriptor may need to be transformed
+   * to only use nested types:
+   * https://developers.google.com/protocol-buffers/docs/proto#nested
+   *
+   * For additional information for how proto types and values map onto BigQuery
+   * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return The protoDescriptor. + */ + @java.lang.Override + public com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor() { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } + + /** + * + * + *
+   * Descriptor for input message.  The provided descriptor must be self
+   * contained, such that data rows sent can be fully decoded using only the
+   * single descriptor.  For data rows that are compositions of multiple
+   * independent messages, this means the descriptor may need to be transformed
+   * to only use nested types:
+   * https://developers.google.com/protocol-buffers/docs/proto#nested
+   *
+   * For additional information for how proto types and values map onto BigQuery
+   * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + @java.lang.Override + public com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder + getProtoDescriptorOrBuilder() { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasProtoDescriptor()) { + if (!getProtoDescriptor().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getProtoDescriptor()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getProtoDescriptor()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ProtoSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ProtoSchema other = + (com.google.cloud.bigquery.storage.v1.ProtoSchema) obj; + + if (hasProtoDescriptor() != other.hasProtoDescriptor()) return false; + if (hasProtoDescriptor()) { + if (!getProtoDescriptor().equals(other.getProtoDescriptor())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasProtoDescriptor()) { + hash = (37 * hash) + PROTO_DESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getProtoDescriptor().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.ProtoSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * ProtoSchema describes the schema of the serialized protocol buffer data rows.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ProtoSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ProtoSchema) + com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ProtoSchema.class, + com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.ProtoSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getProtoDescriptorFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + protoDescriptor_ = null; + if (protoDescriptorBuilder_ != null) { + protoDescriptorBuilder_.dispose(); + protoDescriptorBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1_ProtoSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ProtoSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoSchema build() { + com.google.cloud.bigquery.storage.v1.ProtoSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoSchema buildPartial() { + com.google.cloud.bigquery.storage.v1.ProtoSchema result = + new com.google.cloud.bigquery.storage.v1.ProtoSchema(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.ProtoSchema result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.protoDescriptor_ = + protoDescriptorBuilder_ == null ? protoDescriptor_ : protoDescriptorBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ProtoSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ProtoSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ProtoSchema other) { + if (other == com.google.cloud.bigquery.storage.v1.ProtoSchema.getDefaultInstance()) + return this; + if (other.hasProtoDescriptor()) { + mergeProtoDescriptor(other.getProtoDescriptor()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (hasProtoDescriptor()) { + if (!getProtoDescriptor().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getProtoDescriptorFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.DescriptorProtos.DescriptorProto protoDescriptor_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.DescriptorProtos.DescriptorProto, + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder> + protoDescriptorBuilder_; + + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     *
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return Whether the protoDescriptor field is set. + */ + public boolean hasProtoDescriptor() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     *
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return The protoDescriptor. + */ + public com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor() { + if (protoDescriptorBuilder_ == null) { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } else { + return protoDescriptorBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     *
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder setProtoDescriptor(com.google.protobuf.DescriptorProtos.DescriptorProto value) { + if (protoDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + protoDescriptor_ = value; + } else { + protoDescriptorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     *
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder setProtoDescriptor( + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder builderForValue) { + if (protoDescriptorBuilder_ == null) { + protoDescriptor_ = builderForValue.build(); + } else { + protoDescriptorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     *
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder mergeProtoDescriptor( + com.google.protobuf.DescriptorProtos.DescriptorProto value) { + if (protoDescriptorBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && protoDescriptor_ != null + && protoDescriptor_ + != com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance()) { + getProtoDescriptorBuilder().mergeFrom(value); + } else { + protoDescriptor_ = value; + } + } else { + protoDescriptorBuilder_.mergeFrom(value); + } + if (protoDescriptor_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     *
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder clearProtoDescriptor() { + bitField0_ = (bitField0_ & ~0x00000001); + protoDescriptor_ = null; + if (protoDescriptorBuilder_ != null) { + protoDescriptorBuilder_.dispose(); + protoDescriptorBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     *
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public com.google.protobuf.DescriptorProtos.DescriptorProto.Builder + getProtoDescriptorBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getProtoDescriptorFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     *
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder + getProtoDescriptorOrBuilder() { + if (protoDescriptorBuilder_ != null) { + return protoDescriptorBuilder_.getMessageOrBuilder(); + } else { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } + } + + /** + * + * + *
+     * Descriptor for input message.  The provided descriptor must be self
+     * contained, such that data rows sent can be fully decoded using only the
+     * single descriptor.  For data rows that are compositions of multiple
+     * independent messages, this means the descriptor may need to be transformed
+     * to only use nested types:
+     * https://developers.google.com/protocol-buffers/docs/proto#nested
+     *
+     * For additional information for how proto types and values map onto BigQuery
+     * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.DescriptorProtos.DescriptorProto, + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder> + getProtoDescriptorFieldBuilder() { + if (protoDescriptorBuilder_ == null) { + protoDescriptorBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.DescriptorProtos.DescriptorProto, + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder>( + getProtoDescriptor(), getParentForChildren(), isClean()); + protoDescriptor_ = null; + } + return protoDescriptorBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ProtoSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ProtoSchema) + private static final com.google.cloud.bigquery.storage.v1.ProtoSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ProtoSchema(); + } + + public static com.google.cloud.bigquery.storage.v1.ProtoSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProtoSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ProtoSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaOrBuilder.java new file mode 100644 index 000000000000..9e3b38259a09 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaOrBuilder.java @@ -0,0 +1,87 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/protobuf.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface ProtoSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ProtoSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Descriptor for input message.  The provided descriptor must be self
+   * contained, such that data rows sent can be fully decoded using only the
+   * single descriptor.  For data rows that are compositions of multiple
+   * independent messages, this means the descriptor may need to be transformed
+   * to only use nested types:
+   * https://developers.google.com/protocol-buffers/docs/proto#nested
+   *
+   * For additional information for how proto types and values map onto BigQuery
+   * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return Whether the protoDescriptor field is set. + */ + boolean hasProtoDescriptor(); + + /** + * + * + *
+   * Descriptor for input message.  The provided descriptor must be self
+   * contained, such that data rows sent can be fully decoded using only the
+   * single descriptor.  For data rows that are compositions of multiple
+   * independent messages, this means the descriptor may need to be transformed
+   * to only use nested types:
+   * https://developers.google.com/protocol-buffers/docs/proto#nested
+   *
+   * For additional information for how proto types and values map onto BigQuery
+   * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return The protoDescriptor. + */ + com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor(); + + /** + * + * + *
+   * Descriptor for input message.  The provided descriptor must be self
+   * contained, such that data rows sent can be fully decoded using only the
+   * single descriptor.  For data rows that are compositions of multiple
+   * independent messages, this means the descriptor may need to be transformed
+   * to only use nested types:
+   * https://developers.google.com/protocol-buffers/docs/proto#nested
+   *
+   * For additional information for how proto types and values map onto BigQuery
+   * see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder getProtoDescriptorOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequest.java new file mode 100644 index 000000000000..a17751510aea --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequest.java @@ -0,0 +1,753 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `ReadRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ReadRowsRequest} + */ +public final class ReadRowsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ReadRowsRequest) + ReadRowsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadRowsRequest.newBuilder() to construct. + private ReadRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadRowsRequest() { + readStream_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadRowsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ReadRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ReadRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ReadRowsRequest.class, + com.google.cloud.bigquery.storage.v1.ReadRowsRequest.Builder.class); + } + + public static final int READ_STREAM_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object readStream_ = ""; + + /** + * + * + *
+   * Required. Stream to read rows from.
+   * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The readStream. + */ + @java.lang.Override + public java.lang.String getReadStream() { + java.lang.Object ref = readStream_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + readStream_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Stream to read rows from.
+   * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for readStream. + */ + @java.lang.Override + public com.google.protobuf.ByteString getReadStreamBytes() { + java.lang.Object ref = readStream_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + readStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OFFSET_FIELD_NUMBER = 2; + private long offset_ = 0L; + + /** + * + * + *
+   * The offset requested must be less than the last row read from Read.
+   * Requesting a larger offset is undefined. If not specified, start reading
+   * from offset zero.
+   * 
+ * + * int64 offset = 2; + * + * @return The offset. + */ + @java.lang.Override + public long getOffset() { + return offset_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(readStream_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, readStream_); + } + if (offset_ != 0L) { + output.writeInt64(2, offset_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(readStream_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, readStream_); + } + if (offset_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, offset_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ReadRowsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ReadRowsRequest other = + (com.google.cloud.bigquery.storage.v1.ReadRowsRequest) obj; + + if (!getReadStream().equals(other.getReadStream())) return false; + if (getOffset() != other.getOffset()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + READ_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getReadStream().hashCode(); + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.ReadRowsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `ReadRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ReadRowsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ReadRowsRequest) + com.google.cloud.bigquery.storage.v1.ReadRowsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ReadRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ReadRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ReadRowsRequest.class, + com.google.cloud.bigquery.storage.v1.ReadRowsRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.ReadRowsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + readStream_ = ""; + offset_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ReadRowsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadRowsRequest getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ReadRowsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadRowsRequest build() { + com.google.cloud.bigquery.storage.v1.ReadRowsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadRowsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.ReadRowsRequest result = + new com.google.cloud.bigquery.storage.v1.ReadRowsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.ReadRowsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.readStream_ = readStream_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.offset_ = offset_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ReadRowsRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ReadRowsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ReadRowsRequest other) { + if (other == com.google.cloud.bigquery.storage.v1.ReadRowsRequest.getDefaultInstance()) + return this; + if (!other.getReadStream().isEmpty()) { + readStream_ = other.readStream_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getOffset() != 0L) { + setOffset(other.getOffset()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + readStream_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + offset_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object readStream_ = ""; + + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The readStream. + */ + public java.lang.String getReadStream() { + java.lang.Object ref = readStream_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + readStream_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for readStream. + */ + public com.google.protobuf.ByteString getReadStreamBytes() { + java.lang.Object ref = readStream_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + readStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The readStream to set. + * @return This builder for chaining. + */ + public Builder setReadStream(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + readStream_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearReadStream() { + readStream_ = getDefaultInstance().getReadStream(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for readStream to set. + * @return This builder for chaining. + */ + public Builder setReadStreamBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + readStream_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long offset_; + + /** + * + * + *
+     * The offset requested must be less than the last row read from Read.
+     * Requesting a larger offset is undefined. If not specified, start reading
+     * from offset zero.
+     * 
+ * + * int64 offset = 2; + * + * @return The offset. + */ + @java.lang.Override + public long getOffset() { + return offset_; + } + + /** + * + * + *
+     * The offset requested must be less than the last row read from Read.
+     * Requesting a larger offset is undefined. If not specified, start reading
+     * from offset zero.
+     * 
+ * + * int64 offset = 2; + * + * @param value The offset to set. + * @return This builder for chaining. + */ + public Builder setOffset(long value) { + + offset_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The offset requested must be less than the last row read from Read.
+     * Requesting a larger offset is undefined. If not specified, start reading
+     * from offset zero.
+     * 
+ * + * int64 offset = 2; + * + * @return This builder for chaining. + */ + public Builder clearOffset() { + bitField0_ = (bitField0_ & ~0x00000002); + offset_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ReadRowsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ReadRowsRequest) + private static final com.google.cloud.bigquery.storage.v1.ReadRowsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ReadRowsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadRowsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadRowsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequestOrBuilder.java new file mode 100644 index 000000000000..412dbf8cd1df --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequestOrBuilder.java @@ -0,0 +1,71 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface ReadRowsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ReadRowsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Stream to read rows from.
+   * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The readStream. + */ + java.lang.String getReadStream(); + + /** + * + * + *
+   * Required. Stream to read rows from.
+   * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for readStream. + */ + com.google.protobuf.ByteString getReadStreamBytes(); + + /** + * + * + *
+   * The offset requested must be less than the last row read from Read.
+   * Requesting a larger offset is undefined. If not specified, start reading
+   * from offset zero.
+   * 
+ * + * int64 offset = 2; + * + * @return The offset. + */ + long getOffset(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponse.java new file mode 100644 index 000000000000..026deff8e313 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponse.java @@ -0,0 +1,2791 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Response from calling `ReadRows` may include row data, progress and
+ * throttling information.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ReadRowsResponse} + */ +public final class ReadRowsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ReadRowsResponse) + ReadRowsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadRowsResponse.newBuilder() to construct. + private ReadRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadRowsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadRowsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ReadRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ReadRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ReadRowsResponse.class, + com.google.cloud.bigquery.storage.v1.ReadRowsResponse.Builder.class); + } + + private int bitField0_; + private int rowsCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object rows_; + + public enum RowsCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + AVRO_ROWS(3), + ARROW_RECORD_BATCH(4), + ROWS_NOT_SET(0); + private final int value; + + private RowsCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RowsCase valueOf(int value) { + return forNumber(value); + } + + public static RowsCase forNumber(int value) { + switch (value) { + case 3: + return AVRO_ROWS; + case 4: + return ARROW_RECORD_BATCH; + case 0: + return ROWS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + private int schemaCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object schema_; + + public enum SchemaCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + AVRO_SCHEMA(7), + ARROW_SCHEMA(8), + SCHEMA_NOT_SET(0); + private final int value; + + private SchemaCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SchemaCase valueOf(int value) { + return forNumber(value); + } + + public static SchemaCase forNumber(int value) { + switch (value) { + case 7: + return AVRO_SCHEMA; + case 8: + return ARROW_SCHEMA; + case 0: + return SCHEMA_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public static final int AVRO_ROWS_FIELD_NUMBER = 3; + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + * + * @return Whether the avroRows field is set. + */ + @java.lang.Override + public boolean hasAvroRows() { + return rowsCase_ == 3; + } + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + * + * @return The avroRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroRows getAvroRows() { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1.AvroRows.getDefaultInstance(); + } + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroRowsOrBuilder getAvroRowsOrBuilder() { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1.AvroRows.getDefaultInstance(); + } + + public static final int ARROW_RECORD_BATCH_FIELD_NUMBER = 4; + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + * + * @return Whether the arrowRecordBatch field is set. + */ + @java.lang.Override + public boolean hasArrowRecordBatch() { + return rowsCase_ == 4; + } + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + * + * @return The arrowRecordBatch. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatch getArrowRecordBatch() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance(); + } + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder + getArrowRecordBatchOrBuilder() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance(); + } + + public static final int ROW_COUNT_FIELD_NUMBER = 6; + private long rowCount_ = 0L; + + /** + * + * + *
+   * Number of serialized rows in the rows block.
+   * 
+ * + * int64 row_count = 6; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + public static final int STATS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1.StreamStats stats_; + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + * + * @return Whether the stats field is set. + */ + @java.lang.Override + public boolean hasStats() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + * + * @return The stats. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StreamStats getStats() { + return stats_ == null + ? com.google.cloud.bigquery.storage.v1.StreamStats.getDefaultInstance() + : stats_; + } + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StreamStatsOrBuilder getStatsOrBuilder() { + return stats_ == null + ? com.google.cloud.bigquery.storage.v1.StreamStats.getDefaultInstance() + : stats_; + } + + public static final int THROTTLE_STATE_FIELD_NUMBER = 5; + private com.google.cloud.bigquery.storage.v1.ThrottleState throttleState_; + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + * + * @return Whether the throttleState field is set. + */ + @java.lang.Override + public boolean hasThrottleState() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + * + * @return The throttleState. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ThrottleState getThrottleState() { + return throttleState_ == null + ? com.google.cloud.bigquery.storage.v1.ThrottleState.getDefaultInstance() + : throttleState_; + } + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ThrottleStateOrBuilder getThrottleStateOrBuilder() { + return throttleState_ == null + ? com.google.cloud.bigquery.storage.v1.ThrottleState.getDefaultInstance() + : throttleState_; + } + + public static final int AVRO_SCHEMA_FIELD_NUMBER = 7; + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 7; + } + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSchema getAvroSchema() { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder getAvroSchemaOrBuilder() { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } + + public static final int ARROW_SCHEMA_FIELD_NUMBER = 8; + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 8; + } + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchema getArrowSchema() { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder getArrowSchemaOrBuilder() { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } + + public static final int UNCOMPRESSED_BYTE_SIZE_FIELD_NUMBER = 9; + private long uncompressedByteSize_ = 0L; + + /** + * + * + *
+   * Optional. If the row data in this ReadRowsResponse is compressed, then
+   * uncompressed byte size is the original size of the uncompressed row data.
+   * If it is set to a value greater than 0, then decompress into a buffer of
+   * size uncompressed_byte_size using the compression codec that was requested
+   * during session creation time and which is specified in
+   * TableReadOptions.response_compression_codec in ReadSession.
+   * This value is not set if no response_compression_codec was not requested
+   * and it is -1 if the requested compression would not have reduced the size
+   * of this ReadRowsResponse's row data. This attempts to match Apache Arrow's
+   * behavior described here https://github.com/apache/arrow/issues/15102 where
+   * the uncompressed length may be set to -1 to indicate that the data that
+   * follows is not compressed, which can be useful for cases where compression
+   * does not yield appreciable savings. When uncompressed_byte_size is not
+   * greater than 0, the client should skip decompression.
+   * 
+ * + * optional int64 uncompressed_byte_size = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the uncompressedByteSize field is set. + */ + @java.lang.Override + public boolean hasUncompressedByteSize() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Optional. If the row data in this ReadRowsResponse is compressed, then
+   * uncompressed byte size is the original size of the uncompressed row data.
+   * If it is set to a value greater than 0, then decompress into a buffer of
+   * size uncompressed_byte_size using the compression codec that was requested
+   * during session creation time and which is specified in
+   * TableReadOptions.response_compression_codec in ReadSession.
+   * This value is not set if no response_compression_codec was not requested
+   * and it is -1 if the requested compression would not have reduced the size
+   * of this ReadRowsResponse's row data. This attempts to match Apache Arrow's
+   * behavior described here https://github.com/apache/arrow/issues/15102 where
+   * the uncompressed length may be set to -1 to indicate that the data that
+   * follows is not compressed, which can be useful for cases where compression
+   * does not yield appreciable savings. When uncompressed_byte_size is not
+   * greater than 0, the client should skip decompression.
+   * 
+ * + * optional int64 uncompressed_byte_size = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The uncompressedByteSize. + */ + @java.lang.Override + public long getUncompressedByteSize() { + return uncompressedByteSize_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getStats()); + } + if (rowsCase_ == 3) { + output.writeMessage(3, (com.google.cloud.bigquery.storage.v1.AvroRows) rows_); + } + if (rowsCase_ == 4) { + output.writeMessage(4, (com.google.cloud.bigquery.storage.v1.ArrowRecordBatch) rows_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(5, getThrottleState()); + } + if (rowCount_ != 0L) { + output.writeInt64(6, rowCount_); + } + if (schemaCase_ == 7) { + output.writeMessage(7, (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_); + } + if (schemaCase_ == 8) { + output.writeMessage(8, (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(9, uncompressedByteSize_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStats()); + } + if (rowsCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.cloud.bigquery.storage.v1.AvroRows) rows_); + } + if (rowsCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.cloud.bigquery.storage.v1.ArrowRecordBatch) rows_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getThrottleState()); + } + if (rowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(6, rowCount_); + } + if (schemaCase_ == 7) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_); + } + if (schemaCase_ == 8) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(9, uncompressedByteSize_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ReadRowsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ReadRowsResponse other = + (com.google.cloud.bigquery.storage.v1.ReadRowsResponse) obj; + + if (getRowCount() != other.getRowCount()) return false; + if (hasStats() != other.hasStats()) return false; + if (hasStats()) { + if (!getStats().equals(other.getStats())) return false; + } + if (hasThrottleState() != other.hasThrottleState()) return false; + if (hasThrottleState()) { + if (!getThrottleState().equals(other.getThrottleState())) return false; + } + if (hasUncompressedByteSize() != other.hasUncompressedByteSize()) return false; + if (hasUncompressedByteSize()) { + if (getUncompressedByteSize() != other.getUncompressedByteSize()) return false; + } + if (!getRowsCase().equals(other.getRowsCase())) return false; + switch (rowsCase_) { + case 3: + if (!getAvroRows().equals(other.getAvroRows())) return false; + break; + case 4: + if (!getArrowRecordBatch().equals(other.getArrowRecordBatch())) return false; + break; + case 0: + default: + } + if (!getSchemaCase().equals(other.getSchemaCase())) return false; + switch (schemaCase_) { + case 7: + if (!getAvroSchema().equals(other.getAvroSchema())) return false; + break; + case 8: + if (!getArrowSchema().equals(other.getArrowSchema())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); + if (hasStats()) { + hash = (37 * hash) + STATS_FIELD_NUMBER; + hash = (53 * hash) + getStats().hashCode(); + } + if (hasThrottleState()) { + hash = (37 * hash) + THROTTLE_STATE_FIELD_NUMBER; + hash = (53 * hash) + getThrottleState().hashCode(); + } + if (hasUncompressedByteSize()) { + hash = (37 * hash) + UNCOMPRESSED_BYTE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getUncompressedByteSize()); + } + switch (rowsCase_) { + case 3: + hash = (37 * hash) + AVRO_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getAvroRows().hashCode(); + break; + case 4: + hash = (37 * hash) + ARROW_RECORD_BATCH_FIELD_NUMBER; + hash = (53 * hash) + getArrowRecordBatch().hashCode(); + break; + case 0: + default: + } + switch (schemaCase_) { + case 7: + hash = (37 * hash) + AVRO_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getAvroSchema().hashCode(); + break; + case 8: + hash = (37 * hash) + ARROW_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getArrowSchema().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.ReadRowsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response from calling `ReadRows` may include row data, progress and
+   * throttling information.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ReadRowsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ReadRowsResponse) + com.google.cloud.bigquery.storage.v1.ReadRowsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ReadRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ReadRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ReadRowsResponse.class, + com.google.cloud.bigquery.storage.v1.ReadRowsResponse.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.ReadRowsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getStatsFieldBuilder(); + getThrottleStateFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (avroRowsBuilder_ != null) { + avroRowsBuilder_.clear(); + } + if (arrowRecordBatchBuilder_ != null) { + arrowRecordBatchBuilder_.clear(); + } + rowCount_ = 0L; + stats_ = null; + if (statsBuilder_ != null) { + statsBuilder_.dispose(); + statsBuilder_ = null; + } + throttleState_ = null; + if (throttleStateBuilder_ != null) { + throttleStateBuilder_.dispose(); + throttleStateBuilder_ = null; + } + if (avroSchemaBuilder_ != null) { + avroSchemaBuilder_.clear(); + } + if (arrowSchemaBuilder_ != null) { + arrowSchemaBuilder_.clear(); + } + uncompressedByteSize_ = 0L; + rowsCase_ = 0; + rows_ = null; + schemaCase_ = 0; + schema_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ReadRowsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadRowsResponse getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ReadRowsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadRowsResponse build() { + com.google.cloud.bigquery.storage.v1.ReadRowsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadRowsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1.ReadRowsResponse result = + new com.google.cloud.bigquery.storage.v1.ReadRowsResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.ReadRowsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.rowCount_ = rowCount_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.stats_ = statsBuilder_ == null ? stats_ : statsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.throttleState_ = + throttleStateBuilder_ == null ? throttleState_ : throttleStateBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.uncompressedByteSize_ = uncompressedByteSize_; + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.cloud.bigquery.storage.v1.ReadRowsResponse result) { + result.rowsCase_ = rowsCase_; + result.rows_ = this.rows_; + if (rowsCase_ == 3 && avroRowsBuilder_ != null) { + result.rows_ = avroRowsBuilder_.build(); + } + if (rowsCase_ == 4 && arrowRecordBatchBuilder_ != null) { + result.rows_ = arrowRecordBatchBuilder_.build(); + } + result.schemaCase_ = schemaCase_; + result.schema_ = this.schema_; + if (schemaCase_ == 7 && avroSchemaBuilder_ != null) { + result.schema_ = avroSchemaBuilder_.build(); + } + if (schemaCase_ == 8 && arrowSchemaBuilder_ != null) { + result.schema_ = arrowSchemaBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ReadRowsResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ReadRowsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ReadRowsResponse other) { + if (other == com.google.cloud.bigquery.storage.v1.ReadRowsResponse.getDefaultInstance()) + return this; + if (other.getRowCount() != 0L) { + setRowCount(other.getRowCount()); + } + if (other.hasStats()) { + mergeStats(other.getStats()); + } + if (other.hasThrottleState()) { + mergeThrottleState(other.getThrottleState()); + } + if (other.hasUncompressedByteSize()) { + setUncompressedByteSize(other.getUncompressedByteSize()); + } + switch (other.getRowsCase()) { + case AVRO_ROWS: + { + mergeAvroRows(other.getAvroRows()); + break; + } + case ARROW_RECORD_BATCH: + { + mergeArrowRecordBatch(other.getArrowRecordBatch()); + break; + } + case ROWS_NOT_SET: + { + break; + } + } + switch (other.getSchemaCase()) { + case AVRO_SCHEMA: + { + mergeAvroSchema(other.getAvroSchema()); + break; + } + case ARROW_SCHEMA: + { + mergeArrowSchema(other.getArrowSchema()); + break; + } + case SCHEMA_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + input.readMessage(getStatsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 18 + case 26: + { + input.readMessage(getAvroRowsFieldBuilder().getBuilder(), extensionRegistry); + rowsCase_ = 3; + break; + } // case 26 + case 34: + { + input.readMessage( + getArrowRecordBatchFieldBuilder().getBuilder(), extensionRegistry); + rowsCase_ = 4; + break; + } // case 34 + case 42: + { + input.readMessage(getThrottleStateFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 48: + { + rowCount_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 48 + case 58: + { + input.readMessage(getAvroSchemaFieldBuilder().getBuilder(), extensionRegistry); + schemaCase_ = 7; + break; + } // case 58 + case 66: + { + input.readMessage(getArrowSchemaFieldBuilder().getBuilder(), extensionRegistry); + schemaCase_ = 8; + break; + } // case 66 + case 72: + { + uncompressedByteSize_ = input.readInt64(); + bitField0_ |= 0x00000080; + break; + } // case 72 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int rowsCase_ = 0; + private java.lang.Object rows_; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public Builder clearRows() { + rowsCase_ = 0; + rows_ = null; + onChanged(); + return this; + } + + private int schemaCase_ = 0; + private java.lang.Object schema_; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public Builder clearSchema() { + schemaCase_ = 0; + schema_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AvroRows, + com.google.cloud.bigquery.storage.v1.AvroRows.Builder, + com.google.cloud.bigquery.storage.v1.AvroRowsOrBuilder> + avroRowsBuilder_; + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + * + * @return Whether the avroRows field is set. + */ + @java.lang.Override + public boolean hasAvroRows() { + return rowsCase_ == 3; + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + * + * @return The avroRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroRows getAvroRows() { + if (avroRowsBuilder_ == null) { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1.AvroRows.getDefaultInstance(); + } else { + if (rowsCase_ == 3) { + return avroRowsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.AvroRows.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + */ + public Builder setAvroRows(com.google.cloud.bigquery.storage.v1.AvroRows value) { + if (avroRowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + avroRowsBuilder_.setMessage(value); + } + rowsCase_ = 3; + return this; + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + */ + public Builder setAvroRows( + com.google.cloud.bigquery.storage.v1.AvroRows.Builder builderForValue) { + if (avroRowsBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + avroRowsBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 3; + return this; + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + */ + public Builder mergeAvroRows(com.google.cloud.bigquery.storage.v1.AvroRows value) { + if (avroRowsBuilder_ == null) { + if (rowsCase_ == 3 + && rows_ != com.google.cloud.bigquery.storage.v1.AvroRows.getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1.AvroRows.newBuilder( + (com.google.cloud.bigquery.storage.v1.AvroRows) rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 3) { + avroRowsBuilder_.mergeFrom(value); + } else { + avroRowsBuilder_.setMessage(value); + } + } + rowsCase_ = 3; + return this; + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + */ + public Builder clearAvroRows() { + if (avroRowsBuilder_ == null) { + if (rowsCase_ == 3) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 3) { + rowsCase_ = 0; + rows_ = null; + } + avroRowsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + */ + public com.google.cloud.bigquery.storage.v1.AvroRows.Builder getAvroRowsBuilder() { + return getAvroRowsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroRowsOrBuilder getAvroRowsOrBuilder() { + if ((rowsCase_ == 3) && (avroRowsBuilder_ != null)) { + return avroRowsBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1.AvroRows.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AvroRows, + com.google.cloud.bigquery.storage.v1.AvroRows.Builder, + com.google.cloud.bigquery.storage.v1.AvroRowsOrBuilder> + getAvroRowsFieldBuilder() { + if (avroRowsBuilder_ == null) { + if (!(rowsCase_ == 3)) { + rows_ = com.google.cloud.bigquery.storage.v1.AvroRows.getDefaultInstance(); + } + avroRowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AvroRows, + com.google.cloud.bigquery.storage.v1.AvroRows.Builder, + com.google.cloud.bigquery.storage.v1.AvroRowsOrBuilder>( + (com.google.cloud.bigquery.storage.v1.AvroRows) rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 3; + onChanged(); + return avroRowsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder> + arrowRecordBatchBuilder_; + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + * + * @return Whether the arrowRecordBatch field is set. + */ + @java.lang.Override + public boolean hasArrowRecordBatch() { + return rowsCase_ == 4; + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + * + * @return The arrowRecordBatch. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatch getArrowRecordBatch() { + if (arrowRecordBatchBuilder_ == null) { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance(); + } else { + if (rowsCase_ == 4) { + return arrowRecordBatchBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + */ + public Builder setArrowRecordBatch( + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch value) { + if (arrowRecordBatchBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + arrowRecordBatchBuilder_.setMessage(value); + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + */ + public Builder setArrowRecordBatch( + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder builderForValue) { + if (arrowRecordBatchBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + arrowRecordBatchBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + */ + public Builder mergeArrowRecordBatch( + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch value) { + if (arrowRecordBatchBuilder_ == null) { + if (rowsCase_ == 4 + && rows_ + != com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.newBuilder( + (com.google.cloud.bigquery.storage.v1.ArrowRecordBatch) rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 4) { + arrowRecordBatchBuilder_.mergeFrom(value); + } else { + arrowRecordBatchBuilder_.setMessage(value); + } + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + */ + public Builder clearArrowRecordBatch() { + if (arrowRecordBatchBuilder_ == null) { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + } + arrowRecordBatchBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + */ + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder + getArrowRecordBatchBuilder() { + return getArrowRecordBatchFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder + getArrowRecordBatchOrBuilder() { + if ((rowsCase_ == 4) && (arrowRecordBatchBuilder_ != null)) { + return arrowRecordBatchBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder> + getArrowRecordBatchFieldBuilder() { + if (arrowRecordBatchBuilder_ == null) { + if (!(rowsCase_ == 4)) { + rows_ = com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.getDefaultInstance(); + } + arrowRecordBatchBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder>( + (com.google.cloud.bigquery.storage.v1.ArrowRecordBatch) rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 4; + onChanged(); + return arrowRecordBatchBuilder_; + } + + private long rowCount_; + + /** + * + * + *
+     * Number of serialized rows in the rows block.
+     * 
+ * + * int64 row_count = 6; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + /** + * + * + *
+     * Number of serialized rows in the rows block.
+     * 
+ * + * int64 row_count = 6; + * + * @param value The rowCount to set. + * @return This builder for chaining. + */ + public Builder setRowCount(long value) { + + rowCount_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Number of serialized rows in the rows block.
+     * 
+ * + * int64 row_count = 6; + * + * @return This builder for chaining. + */ + public Builder clearRowCount() { + bitField0_ = (bitField0_ & ~0x00000004); + rowCount_ = 0L; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1.StreamStats stats_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.StreamStats, + com.google.cloud.bigquery.storage.v1.StreamStats.Builder, + com.google.cloud.bigquery.storage.v1.StreamStatsOrBuilder> + statsBuilder_; + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + * + * @return Whether the stats field is set. + */ + public boolean hasStats() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + * + * @return The stats. + */ + public com.google.cloud.bigquery.storage.v1.StreamStats getStats() { + if (statsBuilder_ == null) { + return stats_ == null + ? com.google.cloud.bigquery.storage.v1.StreamStats.getDefaultInstance() + : stats_; + } else { + return statsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + */ + public Builder setStats(com.google.cloud.bigquery.storage.v1.StreamStats value) { + if (statsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stats_ = value; + } else { + statsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + */ + public Builder setStats( + com.google.cloud.bigquery.storage.v1.StreamStats.Builder builderForValue) { + if (statsBuilder_ == null) { + stats_ = builderForValue.build(); + } else { + statsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + */ + public Builder mergeStats(com.google.cloud.bigquery.storage.v1.StreamStats value) { + if (statsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && stats_ != null + && stats_ != com.google.cloud.bigquery.storage.v1.StreamStats.getDefaultInstance()) { + getStatsBuilder().mergeFrom(value); + } else { + stats_ = value; + } + } else { + statsBuilder_.mergeFrom(value); + } + if (stats_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + */ + public Builder clearStats() { + bitField0_ = (bitField0_ & ~0x00000008); + stats_ = null; + if (statsBuilder_ != null) { + statsBuilder_.dispose(); + statsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + */ + public com.google.cloud.bigquery.storage.v1.StreamStats.Builder getStatsBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getStatsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + */ + public com.google.cloud.bigquery.storage.v1.StreamStatsOrBuilder getStatsOrBuilder() { + if (statsBuilder_ != null) { + return statsBuilder_.getMessageOrBuilder(); + } else { + return stats_ == null + ? com.google.cloud.bigquery.storage.v1.StreamStats.getDefaultInstance() + : stats_; + } + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.StreamStats, + com.google.cloud.bigquery.storage.v1.StreamStats.Builder, + com.google.cloud.bigquery.storage.v1.StreamStatsOrBuilder> + getStatsFieldBuilder() { + if (statsBuilder_ == null) { + statsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.StreamStats, + com.google.cloud.bigquery.storage.v1.StreamStats.Builder, + com.google.cloud.bigquery.storage.v1.StreamStatsOrBuilder>( + getStats(), getParentForChildren(), isClean()); + stats_ = null; + } + return statsBuilder_; + } + + private com.google.cloud.bigquery.storage.v1.ThrottleState throttleState_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ThrottleState, + com.google.cloud.bigquery.storage.v1.ThrottleState.Builder, + com.google.cloud.bigquery.storage.v1.ThrottleStateOrBuilder> + throttleStateBuilder_; + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + * + * @return Whether the throttleState field is set. + */ + public boolean hasThrottleState() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + * + * @return The throttleState. + */ + public com.google.cloud.bigquery.storage.v1.ThrottleState getThrottleState() { + if (throttleStateBuilder_ == null) { + return throttleState_ == null + ? com.google.cloud.bigquery.storage.v1.ThrottleState.getDefaultInstance() + : throttleState_; + } else { + return throttleStateBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + */ + public Builder setThrottleState(com.google.cloud.bigquery.storage.v1.ThrottleState value) { + if (throttleStateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + throttleState_ = value; + } else { + throttleStateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + */ + public Builder setThrottleState( + com.google.cloud.bigquery.storage.v1.ThrottleState.Builder builderForValue) { + if (throttleStateBuilder_ == null) { + throttleState_ = builderForValue.build(); + } else { + throttleStateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + */ + public Builder mergeThrottleState(com.google.cloud.bigquery.storage.v1.ThrottleState value) { + if (throttleStateBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && throttleState_ != null + && throttleState_ + != com.google.cloud.bigquery.storage.v1.ThrottleState.getDefaultInstance()) { + getThrottleStateBuilder().mergeFrom(value); + } else { + throttleState_ = value; + } + } else { + throttleStateBuilder_.mergeFrom(value); + } + if (throttleState_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + */ + public Builder clearThrottleState() { + bitField0_ = (bitField0_ & ~0x00000010); + throttleState_ = null; + if (throttleStateBuilder_ != null) { + throttleStateBuilder_.dispose(); + throttleStateBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + */ + public com.google.cloud.bigquery.storage.v1.ThrottleState.Builder getThrottleStateBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getThrottleStateFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + */ + public com.google.cloud.bigquery.storage.v1.ThrottleStateOrBuilder getThrottleStateOrBuilder() { + if (throttleStateBuilder_ != null) { + return throttleStateBuilder_.getMessageOrBuilder(); + } else { + return throttleState_ == null + ? com.google.cloud.bigquery.storage.v1.ThrottleState.getDefaultInstance() + : throttleState_; + } + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ThrottleState, + com.google.cloud.bigquery.storage.v1.ThrottleState.Builder, + com.google.cloud.bigquery.storage.v1.ThrottleStateOrBuilder> + getThrottleStateFieldBuilder() { + if (throttleStateBuilder_ == null) { + throttleStateBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ThrottleState, + com.google.cloud.bigquery.storage.v1.ThrottleState.Builder, + com.google.cloud.bigquery.storage.v1.ThrottleStateOrBuilder>( + getThrottleState(), getParentForChildren(), isClean()); + throttleState_ = null; + } + return throttleStateBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AvroSchema, + com.google.cloud.bigquery.storage.v1.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder> + avroSchemaBuilder_; + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 7; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSchema getAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } else { + if (schemaCase_ == 7) { + return avroSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema(com.google.cloud.bigquery.storage.v1.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + avroSchemaBuilder_.setMessage(value); + } + schemaCase_ = 7; + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema( + com.google.cloud.bigquery.storage.v1.AvroSchema.Builder builderForValue) { + if (avroSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + avroSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 7; + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeAvroSchema(com.google.cloud.bigquery.storage.v1.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 7 + && schema_ != com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1.AvroSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 7) { + avroSchemaBuilder_.mergeFrom(value); + } else { + avroSchemaBuilder_.setMessage(value); + } + } + schemaCase_ = 7; + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 7) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 7) { + schemaCase_ = 0; + schema_ = null; + } + avroSchemaBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.AvroSchema.Builder getAvroSchemaBuilder() { + return getAvroSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder getAvroSchemaOrBuilder() { + if ((schemaCase_ == 7) && (avroSchemaBuilder_ != null)) { + return avroSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AvroSchema, + com.google.cloud.bigquery.storage.v1.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder> + getAvroSchemaFieldBuilder() { + if (avroSchemaBuilder_ == null) { + if (!(schemaCase_ == 7)) { + schema_ = com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } + avroSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AvroSchema, + com.google.cloud.bigquery.storage.v1.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 7; + onChanged(); + return avroSchemaBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowSchema, + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder> + arrowSchemaBuilder_; + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 8; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchema getArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } else { + if (schemaCase_ == 8) { + return arrowSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema(com.google.cloud.bigquery.storage.v1.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(value); + } + schemaCase_ = 8; + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema( + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder builderForValue) { + if (arrowSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 8; + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeArrowSchema(com.google.cloud.bigquery.storage.v1.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 8 + && schema_ != com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1.ArrowSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 8) { + arrowSchemaBuilder_.mergeFrom(value); + } else { + arrowSchemaBuilder_.setMessage(value); + } + } + schemaCase_ = 8; + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 8) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 8) { + schemaCase_ = 0; + schema_ = null; + } + arrowSchemaBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder getArrowSchemaBuilder() { + return getArrowSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder getArrowSchemaOrBuilder() { + if ((schemaCase_ == 8) && (arrowSchemaBuilder_ != null)) { + return arrowSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowSchema, + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder> + getArrowSchemaFieldBuilder() { + if (arrowSchemaBuilder_ == null) { + if (!(schemaCase_ == 8)) { + schema_ = com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } + arrowSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowSchema, + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 8; + onChanged(); + return arrowSchemaBuilder_; + } + + private long uncompressedByteSize_; + + /** + * + * + *
+     * Optional. If the row data in this ReadRowsResponse is compressed, then
+     * uncompressed byte size is the original size of the uncompressed row data.
+     * If it is set to a value greater than 0, then decompress into a buffer of
+     * size uncompressed_byte_size using the compression codec that was requested
+     * during session creation time and which is specified in
+     * TableReadOptions.response_compression_codec in ReadSession.
+     * This value is not set if no response_compression_codec was not requested
+     * and it is -1 if the requested compression would not have reduced the size
+     * of this ReadRowsResponse's row data. This attempts to match Apache Arrow's
+     * behavior described here https://github.com/apache/arrow/issues/15102 where
+     * the uncompressed length may be set to -1 to indicate that the data that
+     * follows is not compressed, which can be useful for cases where compression
+     * does not yield appreciable savings. When uncompressed_byte_size is not
+     * greater than 0, the client should skip decompression.
+     * 
+ * + * optional int64 uncompressed_byte_size = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the uncompressedByteSize field is set. + */ + @java.lang.Override + public boolean hasUncompressedByteSize() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * Optional. If the row data in this ReadRowsResponse is compressed, then
+     * uncompressed byte size is the original size of the uncompressed row data.
+     * If it is set to a value greater than 0, then decompress into a buffer of
+     * size uncompressed_byte_size using the compression codec that was requested
+     * during session creation time and which is specified in
+     * TableReadOptions.response_compression_codec in ReadSession.
+     * This value is not set if no response_compression_codec was not requested
+     * and it is -1 if the requested compression would not have reduced the size
+     * of this ReadRowsResponse's row data. This attempts to match Apache Arrow's
+     * behavior described here https://github.com/apache/arrow/issues/15102 where
+     * the uncompressed length may be set to -1 to indicate that the data that
+     * follows is not compressed, which can be useful for cases where compression
+     * does not yield appreciable savings. When uncompressed_byte_size is not
+     * greater than 0, the client should skip decompression.
+     * 
+ * + * optional int64 uncompressed_byte_size = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The uncompressedByteSize. + */ + @java.lang.Override + public long getUncompressedByteSize() { + return uncompressedByteSize_; + } + + /** + * + * + *
+     * Optional. If the row data in this ReadRowsResponse is compressed, then
+     * uncompressed byte size is the original size of the uncompressed row data.
+     * If it is set to a value greater than 0, then decompress into a buffer of
+     * size uncompressed_byte_size using the compression codec that was requested
+     * during session creation time and which is specified in
+     * TableReadOptions.response_compression_codec in ReadSession.
+     * This value is not set if no response_compression_codec was not requested
+     * and it is -1 if the requested compression would not have reduced the size
+     * of this ReadRowsResponse's row data. This attempts to match Apache Arrow's
+     * behavior described here https://github.com/apache/arrow/issues/15102 where
+     * the uncompressed length may be set to -1 to indicate that the data that
+     * follows is not compressed, which can be useful for cases where compression
+     * does not yield appreciable savings. When uncompressed_byte_size is not
+     * greater than 0, the client should skip decompression.
+     * 
+ * + * optional int64 uncompressed_byte_size = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The uncompressedByteSize to set. + * @return This builder for chaining. + */ + public Builder setUncompressedByteSize(long value) { + + uncompressedByteSize_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. If the row data in this ReadRowsResponse is compressed, then
+     * uncompressed byte size is the original size of the uncompressed row data.
+     * If it is set to a value greater than 0, then decompress into a buffer of
+     * size uncompressed_byte_size using the compression codec that was requested
+     * during session creation time and which is specified in
+     * TableReadOptions.response_compression_codec in ReadSession.
+     * This value is not set if no response_compression_codec was not requested
+     * and it is -1 if the requested compression would not have reduced the size
+     * of this ReadRowsResponse's row data. This attempts to match Apache Arrow's
+     * behavior described here https://github.com/apache/arrow/issues/15102 where
+     * the uncompressed length may be set to -1 to indicate that the data that
+     * follows is not compressed, which can be useful for cases where compression
+     * does not yield appreciable savings. When uncompressed_byte_size is not
+     * greater than 0, the client should skip decompression.
+     * 
+ * + * optional int64 uncompressed_byte_size = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearUncompressedByteSize() { + bitField0_ = (bitField0_ & ~0x00000080); + uncompressedByteSize_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ReadRowsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ReadRowsResponse) + private static final com.google.cloud.bigquery.storage.v1.ReadRowsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ReadRowsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1.ReadRowsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadRowsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadRowsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponseOrBuilder.java new file mode 100644 index 000000000000..e23f9a111df7 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponseOrBuilder.java @@ -0,0 +1,334 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface ReadRowsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ReadRowsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + * + * @return Whether the avroRows field is set. + */ + boolean hasAvroRows(); + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + * + * @return The avroRows. + */ + com.google.cloud.bigquery.storage.v1.AvroRows getAvroRows(); + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.AvroRows avro_rows = 3; + */ + com.google.cloud.bigquery.storage.v1.AvroRowsOrBuilder getAvroRowsOrBuilder(); + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + * + * @return Whether the arrowRecordBatch field is set. + */ + boolean hasArrowRecordBatch(); + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + * + * @return The arrowRecordBatch. + */ + com.google.cloud.bigquery.storage.v1.ArrowRecordBatch getArrowRecordBatch(); + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ArrowRecordBatch arrow_record_batch = 4; + */ + com.google.cloud.bigquery.storage.v1.ArrowRecordBatchOrBuilder getArrowRecordBatchOrBuilder(); + + /** + * + * + *
+   * Number of serialized rows in the rows block.
+   * 
+ * + * int64 row_count = 6; + * + * @return The rowCount. + */ + long getRowCount(); + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + * + * @return Whether the stats field is set. + */ + boolean hasStats(); + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + * + * @return The stats. + */ + com.google.cloud.bigquery.storage.v1.StreamStats getStats(); + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats stats = 2; + */ + com.google.cloud.bigquery.storage.v1.StreamStatsOrBuilder getStatsOrBuilder(); + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + * + * @return Whether the throttleState field is set. + */ + boolean hasThrottleState(); + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + * + * @return The throttleState. + */ + com.google.cloud.bigquery.storage.v1.ThrottleState getThrottleState(); + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ThrottleState throttle_state = 5; + */ + com.google.cloud.bigquery.storage.v1.ThrottleStateOrBuilder getThrottleStateOrBuilder(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + boolean hasAvroSchema(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + com.google.cloud.bigquery.storage.v1.AvroSchema getAvroSchema(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder getAvroSchemaOrBuilder(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + boolean hasArrowSchema(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + com.google.cloud.bigquery.storage.v1.ArrowSchema getArrowSchema(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder getArrowSchemaOrBuilder(); + + /** + * + * + *
+   * Optional. If the row data in this ReadRowsResponse is compressed, then
+   * uncompressed byte size is the original size of the uncompressed row data.
+   * If it is set to a value greater than 0, then decompress into a buffer of
+   * size uncompressed_byte_size using the compression codec that was requested
+   * during session creation time and which is specified in
+   * TableReadOptions.response_compression_codec in ReadSession.
+   * This value is not set if no response_compression_codec was not requested
+   * and it is -1 if the requested compression would not have reduced the size
+   * of this ReadRowsResponse's row data. This attempts to match Apache Arrow's
+   * behavior described here https://github.com/apache/arrow/issues/15102 where
+   * the uncompressed length may be set to -1 to indicate that the data that
+   * follows is not compressed, which can be useful for cases where compression
+   * does not yield appreciable savings. When uncompressed_byte_size is not
+   * greater than 0, the client should skip decompression.
+   * 
+ * + * optional int64 uncompressed_byte_size = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the uncompressedByteSize field is set. + */ + boolean hasUncompressedByteSize(); + + /** + * + * + *
+   * Optional. If the row data in this ReadRowsResponse is compressed, then
+   * uncompressed byte size is the original size of the uncompressed row data.
+   * If it is set to a value greater than 0, then decompress into a buffer of
+   * size uncompressed_byte_size using the compression codec that was requested
+   * during session creation time and which is specified in
+   * TableReadOptions.response_compression_codec in ReadSession.
+   * This value is not set if no response_compression_codec was not requested
+   * and it is -1 if the requested compression would not have reduced the size
+   * of this ReadRowsResponse's row data. This attempts to match Apache Arrow's
+   * behavior described here https://github.com/apache/arrow/issues/15102 where
+   * the uncompressed length may be set to -1 to indicate that the data that
+   * follows is not compressed, which can be useful for cases where compression
+   * does not yield appreciable savings. When uncompressed_byte_size is not
+   * greater than 0, the client should skip decompression.
+   * 
+ * + * optional int64 uncompressed_byte_size = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The uncompressedByteSize. + */ + long getUncompressedByteSize(); + + com.google.cloud.bigquery.storage.v1.ReadRowsResponse.RowsCase getRowsCase(); + + com.google.cloud.bigquery.storage.v1.ReadRowsResponse.SchemaCase getSchemaCase(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java new file mode 100644 index 000000000000..059114b948d3 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java @@ -0,0 +1,8179 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Information about the ReadSession.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ReadSession} + */ +public final class ReadSession extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ReadSession) + ReadSessionOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadSession.newBuilder() to construct. + private ReadSession(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadSession() { + name_ = ""; + dataFormat_ = 0; + table_ = ""; + streams_ = java.util.Collections.emptyList(); + traceId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadSession(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ReadSession.class, + com.google.cloud.bigquery.storage.v1.ReadSession.Builder.class); + } + + public interface TableModifiersOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ReadSession.TableModifiers) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return Whether the snapshotTime field is set. + */ + boolean hasSnapshotTime(); + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return The snapshotTime. + */ + com.google.protobuf.Timestamp getSnapshotTime(); + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + com.google.protobuf.TimestampOrBuilder getSnapshotTimeOrBuilder(); + } + + /** + * + * + *
+   * Additional attributes when reading a table.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ReadSession.TableModifiers} + */ + public static final class TableModifiers extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ReadSession.TableModifiers) + TableModifiersOrBuilder { + private static final long serialVersionUID = 0L; + + // Use TableModifiers.newBuilder() to construct. + private TableModifiers(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableModifiers() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableModifiers(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableModifiers_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableModifiers_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.class, + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.Builder.class); + } + + private int bitField0_; + public static final int SNAPSHOT_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp snapshotTime_; + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return Whether the snapshotTime field is set. + */ + @java.lang.Override + public boolean hasSnapshotTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return The snapshotTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getSnapshotTime() { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getSnapshotTimeOrBuilder() { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getSnapshotTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getSnapshotTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers other = + (com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers) obj; + + if (hasSnapshotTime() != other.hasSnapshotTime()) return false; + if (hasSnapshotTime()) { + if (!getSnapshotTime().equals(other.getSnapshotTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasSnapshotTime()) { + hash = (37 * hash) + SNAPSHOT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getSnapshotTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Additional attributes when reading a table.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ReadSession.TableModifiers} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ReadSession.TableModifiers) + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiersOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableModifiers_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableModifiers_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.class, + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getSnapshotTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + snapshotTime_ = null; + if (snapshotTimeBuilder_ != null) { + snapshotTimeBuilder_.dispose(); + snapshotTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableModifiers_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers build() { + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers buildPartial() { + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers result = + new com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.snapshotTime_ = + snapshotTimeBuilder_ == null ? snapshotTime_ : snapshotTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers other) { + if (other + == com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.getDefaultInstance()) + return this; + if (other.hasSnapshotTime()) { + mergeSnapshotTime(other.getSnapshotTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getSnapshotTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp snapshotTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + snapshotTimeBuilder_; + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return Whether the snapshotTime field is set. + */ + public boolean hasSnapshotTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return The snapshotTime. + */ + public com.google.protobuf.Timestamp getSnapshotTime() { + if (snapshotTimeBuilder_ == null) { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } else { + return snapshotTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder setSnapshotTime(com.google.protobuf.Timestamp value) { + if (snapshotTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + snapshotTime_ = value; + } else { + snapshotTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder setSnapshotTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (snapshotTimeBuilder_ == null) { + snapshotTime_ = builderForValue.build(); + } else { + snapshotTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder mergeSnapshotTime(com.google.protobuf.Timestamp value) { + if (snapshotTimeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && snapshotTime_ != null + && snapshotTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getSnapshotTimeBuilder().mergeFrom(value); + } else { + snapshotTime_ = value; + } + } else { + snapshotTimeBuilder_.mergeFrom(value); + } + if (snapshotTime_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder clearSnapshotTime() { + bitField0_ = (bitField0_ & ~0x00000001); + snapshotTime_ = null; + if (snapshotTimeBuilder_ != null) { + snapshotTimeBuilder_.dispose(); + snapshotTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public com.google.protobuf.Timestamp.Builder getSnapshotTimeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getSnapshotTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getSnapshotTimeOrBuilder() { + if (snapshotTimeBuilder_ != null) { + return snapshotTimeBuilder_.getMessageOrBuilder(); + } else { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getSnapshotTimeFieldBuilder() { + if (snapshotTimeBuilder_ == null) { + snapshotTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getSnapshotTime(), getParentForChildren(), isClean()); + snapshotTime_ = null; + } + return snapshotTimeBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ReadSession.TableModifiers) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ReadSession.TableModifiers) + private static final com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers(); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableModifiers parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface TableReadOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return A list containing the selectedFields. + */ + java.util.List getSelectedFieldsList(); + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return The count of selectedFields. + */ + int getSelectedFieldsCount(); + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the element to return. + * @return The selectedFields at the given index. + */ + java.lang.String getSelectedFields(int index); + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the value to return. + * @return The bytes of the selectedFields at the given index. + */ + com.google.protobuf.ByteString getSelectedFieldsBytes(int index); + + /** + * + * + *
+     * SQL text filtering statement, similar to a WHERE clause in a query.
+     * Aggregates are not supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string row_restriction = 2; + * + * @return The rowRestriction. + */ + java.lang.String getRowRestriction(); + + /** + * + * + *
+     * SQL text filtering statement, similar to a WHERE clause in a query.
+     * Aggregates are not supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string row_restriction = 2; + * + * @return The bytes for rowRestriction. + */ + com.google.protobuf.ByteString getRowRestrictionBytes(); + + /** + * + * + *
+     * Optional. Options specific to the Apache Arrow output format.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the arrowSerializationOptions field is set. + */ + boolean hasArrowSerializationOptions(); + + /** + * + * + *
+     * Optional. Options specific to the Apache Arrow output format.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The arrowSerializationOptions. + */ + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions getArrowSerializationOptions(); + + /** + * + * + *
+     * Optional. Options specific to the Apache Arrow output format.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptionsOrBuilder + getArrowSerializationOptionsOrBuilder(); + + /** + * + * + *
+     * Optional. Options specific to the Apache Avro output format
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the avroSerializationOptions field is set. + */ + boolean hasAvroSerializationOptions(); + + /** + * + * + *
+     * Optional. Options specific to the Apache Avro output format
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The avroSerializationOptions. + */ + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions getAvroSerializationOptions(); + + /** + * + * + *
+     * Optional. Options specific to the Apache Avro output format
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1.AvroSerializationOptionsOrBuilder + getAvroSerializationOptionsOrBuilder(); + + /** + * + * + *
+     * Optional. Specifies a table sampling percentage. Specifically, the query
+     * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+     * sampling percentage is applied at the data block granularity. It will
+     * randomly choose for each data block whether to read the rows in that data
+     * block. For more details, see
+     * https://cloud.google.com/bigquery/docs/table-sampling)
+     * 
+ * + * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the samplePercentage field is set. + */ + boolean hasSamplePercentage(); + + /** + * + * + *
+     * Optional. Specifies a table sampling percentage. Specifically, the query
+     * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+     * sampling percentage is applied at the data block granularity. It will
+     * randomly choose for each data block whether to read the rows in that data
+     * block. For more details, see
+     * https://cloud.google.com/bigquery/docs/table-sampling)
+     * 
+ * + * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The samplePercentage. + */ + double getSamplePercentage(); + + /** + * + * + *
+     * Optional. Set response_compression_codec when creating a read session to
+     * enable application-level compression of ReadRows responses.
+     * 
+ * + * + * optional .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec response_compression_codec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the responseCompressionCodec field is set. + */ + boolean hasResponseCompressionCodec(); + + /** + * + * + *
+     * Optional. Set response_compression_codec when creating a read session to
+     * enable application-level compression of ReadRows responses.
+     * 
+ * + * + * optional .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec response_compression_codec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for responseCompressionCodec. + */ + int getResponseCompressionCodecValue(); + + /** + * + * + *
+     * Optional. Set response_compression_codec when creating a read session to
+     * enable application-level compression of ReadRows responses.
+     * 
+ * + * + * optional .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec response_compression_codec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The responseCompressionCodec. + */ + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec + getResponseCompressionCodec(); + + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + .OutputFormatSerializationOptionsCase + getOutputFormatSerializationOptionsCase(); + } + + /** + * + * + *
+   * Options dictating how we read a table.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions} + */ + public static final class TableReadOptions extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions) + TableReadOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use TableReadOptions.newBuilder() to construct. + private TableReadOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableReadOptions() { + selectedFields_ = com.google.protobuf.LazyStringArrayList.emptyList(); + rowRestriction_ = ""; + responseCompressionCodec_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableReadOptions(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableReadOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableReadOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.class, + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.Builder.class); + } + + /** + * + * + *
+     * Specifies which compression codec to attempt on the entire serialized
+     * response payload (either Arrow record batch or Avro rows). This is
+     * not to be confused with the Apache Arrow native compression codecs
+     * specified in ArrowSerializationOptions. For performance reasons, when
+     * creating a read session requesting Arrow responses, setting both native
+     * Arrow compression and application-level response compression will not be
+     * allowed - choose, at most, one kind of compression.
+     * 
+ * + * Protobuf enum {@code + * google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec} + */ + public enum ResponseCompressionCodec implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+       * Default is no compression.
+       * 
+ * + * RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0; + */ + RESPONSE_COMPRESSION_CODEC_UNSPECIFIED(0), + /** + * + * + *
+       * Use raw LZ4 compression.
+       * 
+ * + * RESPONSE_COMPRESSION_CODEC_LZ4 = 2; + */ + RESPONSE_COMPRESSION_CODEC_LZ4(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+       * Default is no compression.
+       * 
+ * + * RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0; + */ + public static final int RESPONSE_COMPRESSION_CODEC_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+       * Use raw LZ4 compression.
+       * 
+ * + * RESPONSE_COMPRESSION_CODEC_LZ4 = 2; + */ + public static final int RESPONSE_COMPRESSION_CODEC_LZ4_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ResponseCompressionCodec valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static ResponseCompressionCodec forNumber(int value) { + switch (value) { + case 0: + return RESPONSE_COMPRESSION_CODEC_UNSPECIFIED; + case 2: + return RESPONSE_COMPRESSION_CODEC_LZ4; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ResponseCompressionCodec findValueByNumber(int number) { + return ResponseCompressionCodec.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final ResponseCompressionCodec[] VALUES = values(); + + public static ResponseCompressionCodec valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ResponseCompressionCodec(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec) + } + + private int bitField0_; + private int outputFormatSerializationOptionsCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object outputFormatSerializationOptions_; + + public enum OutputFormatSerializationOptionsCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + ARROW_SERIALIZATION_OPTIONS(3), + AVRO_SERIALIZATION_OPTIONS(4), + OUTPUTFORMATSERIALIZATIONOPTIONS_NOT_SET(0); + private final int value; + + private OutputFormatSerializationOptionsCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OutputFormatSerializationOptionsCase valueOf(int value) { + return forNumber(value); + } + + public static OutputFormatSerializationOptionsCase forNumber(int value) { + switch (value) { + case 3: + return ARROW_SERIALIZATION_OPTIONS; + case 4: + return AVRO_SERIALIZATION_OPTIONS; + case 0: + return OUTPUTFORMATSERIALIZATIONOPTIONS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public OutputFormatSerializationOptionsCase getOutputFormatSerializationOptionsCase() { + return OutputFormatSerializationOptionsCase.forNumber(outputFormatSerializationOptionsCase_); + } + + public static final int SELECTED_FIELDS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList selectedFields_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return A list containing the selectedFields. + */ + public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { + return selectedFields_; + } + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return The count of selectedFields. + */ + public int getSelectedFieldsCount() { + return selectedFields_.size(); + } + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the element to return. + * @return The selectedFields at the given index. + */ + public java.lang.String getSelectedFields(int index) { + return selectedFields_.get(index); + } + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the value to return. + * @return The bytes of the selectedFields at the given index. + */ + public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { + return selectedFields_.getByteString(index); + } + + public static final int ROW_RESTRICTION_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object rowRestriction_ = ""; + + /** + * + * + *
+     * SQL text filtering statement, similar to a WHERE clause in a query.
+     * Aggregates are not supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string row_restriction = 2; + * + * @return The rowRestriction. + */ + @java.lang.Override + public java.lang.String getRowRestriction() { + java.lang.Object ref = rowRestriction_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rowRestriction_ = s; + return s; + } + } + + /** + * + * + *
+     * SQL text filtering statement, similar to a WHERE clause in a query.
+     * Aggregates are not supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string row_restriction = 2; + * + * @return The bytes for rowRestriction. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRowRestrictionBytes() { + java.lang.Object ref = rowRestriction_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rowRestriction_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ARROW_SERIALIZATION_OPTIONS_FIELD_NUMBER = 3; + + /** + * + * + *
+     * Optional. Options specific to the Apache Arrow output format.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the arrowSerializationOptions field is set. + */ + @java.lang.Override + public boolean hasArrowSerializationOptions() { + return outputFormatSerializationOptionsCase_ == 3; + } + + /** + * + * + *
+     * Optional. Options specific to the Apache Arrow output format.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The arrowSerializationOptions. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions + getArrowSerializationOptions() { + if (outputFormatSerializationOptionsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + outputFormatSerializationOptions_; + } + return com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.getDefaultInstance(); + } + + /** + * + * + *
+     * Optional. Options specific to the Apache Arrow output format.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptionsOrBuilder + getArrowSerializationOptionsOrBuilder() { + if (outputFormatSerializationOptionsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + outputFormatSerializationOptions_; + } + return com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.getDefaultInstance(); + } + + public static final int AVRO_SERIALIZATION_OPTIONS_FIELD_NUMBER = 4; + + /** + * + * + *
+     * Optional. Options specific to the Apache Avro output format
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the avroSerializationOptions field is set. + */ + @java.lang.Override + public boolean hasAvroSerializationOptions() { + return outputFormatSerializationOptionsCase_ == 4; + } + + /** + * + * + *
+     * Optional. Options specific to the Apache Avro output format
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The avroSerializationOptions. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSerializationOptions + getAvroSerializationOptions() { + if (outputFormatSerializationOptionsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AvroSerializationOptions) + outputFormatSerializationOptions_; + } + return com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.getDefaultInstance(); + } + + /** + * + * + *
+     * Optional. Options specific to the Apache Avro output format
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSerializationOptionsOrBuilder + getAvroSerializationOptionsOrBuilder() { + if (outputFormatSerializationOptionsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AvroSerializationOptions) + outputFormatSerializationOptions_; + } + return com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.getDefaultInstance(); + } + + public static final int SAMPLE_PERCENTAGE_FIELD_NUMBER = 5; + private double samplePercentage_ = 0D; + + /** + * + * + *
+     * Optional. Specifies a table sampling percentage. Specifically, the query
+     * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+     * sampling percentage is applied at the data block granularity. It will
+     * randomly choose for each data block whether to read the rows in that data
+     * block. For more details, see
+     * https://cloud.google.com/bigquery/docs/table-sampling)
+     * 
+ * + * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the samplePercentage field is set. + */ + @java.lang.Override + public boolean hasSamplePercentage() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Optional. Specifies a table sampling percentage. Specifically, the query
+     * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+     * sampling percentage is applied at the data block granularity. It will
+     * randomly choose for each data block whether to read the rows in that data
+     * block. For more details, see
+     * https://cloud.google.com/bigquery/docs/table-sampling)
+     * 
+ * + * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The samplePercentage. + */ + @java.lang.Override + public double getSamplePercentage() { + return samplePercentage_; + } + + public static final int RESPONSE_COMPRESSION_CODEC_FIELD_NUMBER = 6; + private int responseCompressionCodec_ = 0; + + /** + * + * + *
+     * Optional. Set response_compression_codec when creating a read session to
+     * enable application-level compression of ReadRows responses.
+     * 
+ * + * + * optional .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec response_compression_codec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the responseCompressionCodec field is set. + */ + @java.lang.Override + public boolean hasResponseCompressionCodec() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. Set response_compression_codec when creating a read session to
+     * enable application-level compression of ReadRows responses.
+     * 
+ * + * + * optional .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec response_compression_codec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for responseCompressionCodec. + */ + @java.lang.Override + public int getResponseCompressionCodecValue() { + return responseCompressionCodec_; + } + + /** + * + * + *
+     * Optional. Set response_compression_codec when creating a read session to
+     * enable application-level compression of ReadRows responses.
+     * 
+ * + * + * optional .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec response_compression_codec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The responseCompressionCodec. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + .ResponseCompressionCodec + getResponseCompressionCodec() { + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec + result = + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + .ResponseCompressionCodec.forNumber(responseCompressionCodec_); + return result == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + .ResponseCompressionCodec.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < selectedFields_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, selectedFields_.getRaw(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(rowRestriction_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, rowRestriction_); + } + if (outputFormatSerializationOptionsCase_ == 3) { + output.writeMessage( + 3, + (com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + outputFormatSerializationOptions_); + } + if (outputFormatSerializationOptionsCase_ == 4) { + output.writeMessage( + 4, + (com.google.cloud.bigquery.storage.v1.AvroSerializationOptions) + outputFormatSerializationOptions_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeDouble(5, samplePercentage_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeEnum(6, responseCompressionCodec_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < selectedFields_.size(); i++) { + dataSize += computeStringSizeNoTag(selectedFields_.getRaw(i)); + } + size += dataSize; + size += 1 * getSelectedFieldsList().size(); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(rowRestriction_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, rowRestriction_); + } + if (outputFormatSerializationOptionsCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, + (com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + outputFormatSerializationOptions_); + } + if (outputFormatSerializationOptionsCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, + (com.google.cloud.bigquery.storage.v1.AvroSerializationOptions) + outputFormatSerializationOptions_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(5, samplePercentage_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(6, responseCompressionCodec_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions other = + (com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions) obj; + + if (!getSelectedFieldsList().equals(other.getSelectedFieldsList())) return false; + if (!getRowRestriction().equals(other.getRowRestriction())) return false; + if (hasSamplePercentage() != other.hasSamplePercentage()) return false; + if (hasSamplePercentage()) { + if (java.lang.Double.doubleToLongBits(getSamplePercentage()) + != java.lang.Double.doubleToLongBits(other.getSamplePercentage())) return false; + } + if (hasResponseCompressionCodec() != other.hasResponseCompressionCodec()) return false; + if (hasResponseCompressionCodec()) { + if (responseCompressionCodec_ != other.responseCompressionCodec_) return false; + } + if (!getOutputFormatSerializationOptionsCase() + .equals(other.getOutputFormatSerializationOptionsCase())) return false; + switch (outputFormatSerializationOptionsCase_) { + case 3: + if (!getArrowSerializationOptions().equals(other.getArrowSerializationOptions())) + return false; + break; + case 4: + if (!getAvroSerializationOptions().equals(other.getAvroSerializationOptions())) + return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getSelectedFieldsCount() > 0) { + hash = (37 * hash) + SELECTED_FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getSelectedFieldsList().hashCode(); + } + hash = (37 * hash) + ROW_RESTRICTION_FIELD_NUMBER; + hash = (53 * hash) + getRowRestriction().hashCode(); + if (hasSamplePercentage()) { + hash = (37 * hash) + SAMPLE_PERCENTAGE_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getSamplePercentage())); + } + if (hasResponseCompressionCodec()) { + hash = (37 * hash) + RESPONSE_COMPRESSION_CODEC_FIELD_NUMBER; + hash = (53 * hash) + responseCompressionCodec_; + } + switch (outputFormatSerializationOptionsCase_) { + case 3: + hash = (37 * hash) + ARROW_SERIALIZATION_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getArrowSerializationOptions().hashCode(); + break; + case 4: + hash = (37 * hash) + AVRO_SERIALIZATION_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getAvroSerializationOptions().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Options dictating how we read a table.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions) + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableReadOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableReadOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.class, + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + selectedFields_ = com.google.protobuf.LazyStringArrayList.emptyList(); + rowRestriction_ = ""; + if (arrowSerializationOptionsBuilder_ != null) { + arrowSerializationOptionsBuilder_.clear(); + } + if (avroSerializationOptionsBuilder_ != null) { + avroSerializationOptionsBuilder_.clear(); + } + samplePercentage_ = 0D; + responseCompressionCodec_ = 0; + outputFormatSerializationOptionsCase_ = 0; + outputFormatSerializationOptions_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableReadOptions_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions build() { + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions buildPartial() { + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions result = + new com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + selectedFields_.makeImmutable(); + result.selectedFields_ = selectedFields_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.rowRestriction_ = rowRestriction_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.samplePercentage_ = samplePercentage_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.responseCompressionCodec_ = responseCompressionCodec_; + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs( + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions result) { + result.outputFormatSerializationOptionsCase_ = outputFormatSerializationOptionsCase_; + result.outputFormatSerializationOptions_ = this.outputFormatSerializationOptions_; + if (outputFormatSerializationOptionsCase_ == 3 + && arrowSerializationOptionsBuilder_ != null) { + result.outputFormatSerializationOptions_ = arrowSerializationOptionsBuilder_.build(); + } + if (outputFormatSerializationOptionsCase_ == 4 + && avroSerializationOptionsBuilder_ != null) { + result.outputFormatSerializationOptions_ = avroSerializationOptionsBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions other) { + if (other + == com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + .getDefaultInstance()) return this; + if (!other.selectedFields_.isEmpty()) { + if (selectedFields_.isEmpty()) { + selectedFields_ = other.selectedFields_; + bitField0_ |= 0x00000001; + } else { + ensureSelectedFieldsIsMutable(); + selectedFields_.addAll(other.selectedFields_); + } + onChanged(); + } + if (!other.getRowRestriction().isEmpty()) { + rowRestriction_ = other.rowRestriction_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasSamplePercentage()) { + setSamplePercentage(other.getSamplePercentage()); + } + if (other.hasResponseCompressionCodec()) { + setResponseCompressionCodec(other.getResponseCompressionCodec()); + } + switch (other.getOutputFormatSerializationOptionsCase()) { + case ARROW_SERIALIZATION_OPTIONS: + { + mergeArrowSerializationOptions(other.getArrowSerializationOptions()); + break; + } + case AVRO_SERIALIZATION_OPTIONS: + { + mergeAvroSerializationOptions(other.getAvroSerializationOptions()); + break; + } + case OUTPUTFORMATSERIALIZATIONOPTIONS_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureSelectedFieldsIsMutable(); + selectedFields_.add(s); + break; + } // case 10 + case 18: + { + rowRestriction_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + getArrowSerializationOptionsFieldBuilder().getBuilder(), extensionRegistry); + outputFormatSerializationOptionsCase_ = 3; + break; + } // case 26 + case 34: + { + input.readMessage( + getAvroSerializationOptionsFieldBuilder().getBuilder(), extensionRegistry); + outputFormatSerializationOptionsCase_ = 4; + break; + } // case 34 + case 41: + { + samplePercentage_ = input.readDouble(); + bitField0_ |= 0x00000010; + break; + } // case 41 + case 48: + { + responseCompressionCodec_ = input.readEnum(); + bitField0_ |= 0x00000020; + break; + } // case 48 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int outputFormatSerializationOptionsCase_ = 0; + private java.lang.Object outputFormatSerializationOptions_; + + public OutputFormatSerializationOptionsCase getOutputFormatSerializationOptionsCase() { + return OutputFormatSerializationOptionsCase.forNumber( + outputFormatSerializationOptionsCase_); + } + + public Builder clearOutputFormatSerializationOptions() { + outputFormatSerializationOptionsCase_ = 0; + outputFormatSerializationOptions_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList selectedFields_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureSelectedFieldsIsMutable() { + if (!selectedFields_.isModifiable()) { + selectedFields_ = new com.google.protobuf.LazyStringArrayList(selectedFields_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @return A list containing the selectedFields. + */ + public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { + selectedFields_.makeImmutable(); + return selectedFields_; + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @return The count of selectedFields. + */ + public int getSelectedFieldsCount() { + return selectedFields_.size(); + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the element to return. + * @return The selectedFields at the given index. + */ + public java.lang.String getSelectedFields(int index) { + return selectedFields_.get(index); + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the value to return. + * @return The bytes of the selectedFields at the given index. + */ + public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { + return selectedFields_.getByteString(index); + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index to set the value at. + * @param value The selectedFields to set. + * @return This builder for chaining. + */ + public Builder setSelectedFields(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSelectedFieldsIsMutable(); + selectedFields_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param value The selectedFields to add. + * @return This builder for chaining. + */ + public Builder addSelectedFields(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSelectedFieldsIsMutable(); + selectedFields_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param values The selectedFields to add. + * @return This builder for chaining. + */ + public Builder addAllSelectedFields(java.lang.Iterable values) { + ensureSelectedFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, selectedFields_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @return This builder for chaining. + */ + public Builder clearSelectedFields() { + selectedFields_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param value The bytes of the selectedFields to add. + * @return This builder for chaining. + */ + public Builder addSelectedFieldsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureSelectedFieldsIsMutable(); + selectedFields_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object rowRestriction_ = ""; + + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @return The rowRestriction. + */ + public java.lang.String getRowRestriction() { + java.lang.Object ref = rowRestriction_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rowRestriction_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @return The bytes for rowRestriction. + */ + public com.google.protobuf.ByteString getRowRestrictionBytes() { + java.lang.Object ref = rowRestriction_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rowRestriction_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @param value The rowRestriction to set. + * @return This builder for chaining. + */ + public Builder setRowRestriction(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + rowRestriction_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @return This builder for chaining. + */ + public Builder clearRowRestriction() { + rowRestriction_ = getDefaultInstance().getRowRestriction(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @param value The bytes for rowRestriction to set. + * @return This builder for chaining. + */ + public Builder setRowRestrictionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + rowRestriction_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions, + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.Builder, + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptionsOrBuilder> + arrowSerializationOptionsBuilder_; + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the arrowSerializationOptions field is set. + */ + @java.lang.Override + public boolean hasArrowSerializationOptions() { + return outputFormatSerializationOptionsCase_ == 3; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The arrowSerializationOptions. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions + getArrowSerializationOptions() { + if (arrowSerializationOptionsBuilder_ == null) { + if (outputFormatSerializationOptionsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + outputFormatSerializationOptions_; + } + return com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions + .getDefaultInstance(); + } else { + if (outputFormatSerializationOptionsCase_ == 3) { + return arrowSerializationOptionsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setArrowSerializationOptions( + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions value) { + if (arrowSerializationOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + outputFormatSerializationOptions_ = value; + onChanged(); + } else { + arrowSerializationOptionsBuilder_.setMessage(value); + } + outputFormatSerializationOptionsCase_ = 3; + return this; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setArrowSerializationOptions( + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.Builder builderForValue) { + if (arrowSerializationOptionsBuilder_ == null) { + outputFormatSerializationOptions_ = builderForValue.build(); + onChanged(); + } else { + arrowSerializationOptionsBuilder_.setMessage(builderForValue.build()); + } + outputFormatSerializationOptionsCase_ = 3; + return this; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeArrowSerializationOptions( + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions value) { + if (arrowSerializationOptionsBuilder_ == null) { + if (outputFormatSerializationOptionsCase_ == 3 + && outputFormatSerializationOptions_ + != com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions + .getDefaultInstance()) { + outputFormatSerializationOptions_ = + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.newBuilder( + (com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + outputFormatSerializationOptions_) + .mergeFrom(value) + .buildPartial(); + } else { + outputFormatSerializationOptions_ = value; + } + onChanged(); + } else { + if (outputFormatSerializationOptionsCase_ == 3) { + arrowSerializationOptionsBuilder_.mergeFrom(value); + } else { + arrowSerializationOptionsBuilder_.setMessage(value); + } + } + outputFormatSerializationOptionsCase_ = 3; + return this; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearArrowSerializationOptions() { + if (arrowSerializationOptionsBuilder_ == null) { + if (outputFormatSerializationOptionsCase_ == 3) { + outputFormatSerializationOptionsCase_ = 0; + outputFormatSerializationOptions_ = null; + onChanged(); + } + } else { + if (outputFormatSerializationOptionsCase_ == 3) { + outputFormatSerializationOptionsCase_ = 0; + outputFormatSerializationOptions_ = null; + } + arrowSerializationOptionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.Builder + getArrowSerializationOptionsBuilder() { + return getArrowSerializationOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptionsOrBuilder + getArrowSerializationOptionsOrBuilder() { + if ((outputFormatSerializationOptionsCase_ == 3) + && (arrowSerializationOptionsBuilder_ != null)) { + return arrowSerializationOptionsBuilder_.getMessageOrBuilder(); + } else { + if (outputFormatSerializationOptionsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + outputFormatSerializationOptions_; + } + return com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions, + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.Builder, + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptionsOrBuilder> + getArrowSerializationOptionsFieldBuilder() { + if (arrowSerializationOptionsBuilder_ == null) { + if (!(outputFormatSerializationOptionsCase_ == 3)) { + outputFormatSerializationOptions_ = + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.getDefaultInstance(); + } + arrowSerializationOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions, + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.Builder, + com.google.cloud.bigquery.storage.v1.ArrowSerializationOptionsOrBuilder>( + (com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions) + outputFormatSerializationOptions_, + getParentForChildren(), + isClean()); + outputFormatSerializationOptions_ = null; + } + outputFormatSerializationOptionsCase_ = 3; + onChanged(); + return arrowSerializationOptionsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions, + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.Builder, + com.google.cloud.bigquery.storage.v1.AvroSerializationOptionsOrBuilder> + avroSerializationOptionsBuilder_; + + /** + * + * + *
+       * Optional. Options specific to the Apache Avro output format
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the avroSerializationOptions field is set. + */ + @java.lang.Override + public boolean hasAvroSerializationOptions() { + return outputFormatSerializationOptionsCase_ == 4; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Avro output format
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The avroSerializationOptions. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSerializationOptions + getAvroSerializationOptions() { + if (avroSerializationOptionsBuilder_ == null) { + if (outputFormatSerializationOptionsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AvroSerializationOptions) + outputFormatSerializationOptions_; + } + return com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.getDefaultInstance(); + } else { + if (outputFormatSerializationOptionsCase_ == 4) { + return avroSerializationOptionsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.getDefaultInstance(); + } + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Avro output format
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAvroSerializationOptions( + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions value) { + if (avroSerializationOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + outputFormatSerializationOptions_ = value; + onChanged(); + } else { + avroSerializationOptionsBuilder_.setMessage(value); + } + outputFormatSerializationOptionsCase_ = 4; + return this; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Avro output format
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAvroSerializationOptions( + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.Builder builderForValue) { + if (avroSerializationOptionsBuilder_ == null) { + outputFormatSerializationOptions_ = builderForValue.build(); + onChanged(); + } else { + avroSerializationOptionsBuilder_.setMessage(builderForValue.build()); + } + outputFormatSerializationOptionsCase_ = 4; + return this; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Avro output format
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeAvroSerializationOptions( + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions value) { + if (avroSerializationOptionsBuilder_ == null) { + if (outputFormatSerializationOptionsCase_ == 4 + && outputFormatSerializationOptions_ + != com.google.cloud.bigquery.storage.v1.AvroSerializationOptions + .getDefaultInstance()) { + outputFormatSerializationOptions_ = + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.newBuilder( + (com.google.cloud.bigquery.storage.v1.AvroSerializationOptions) + outputFormatSerializationOptions_) + .mergeFrom(value) + .buildPartial(); + } else { + outputFormatSerializationOptions_ = value; + } + onChanged(); + } else { + if (outputFormatSerializationOptionsCase_ == 4) { + avroSerializationOptionsBuilder_.mergeFrom(value); + } else { + avroSerializationOptionsBuilder_.setMessage(value); + } + } + outputFormatSerializationOptionsCase_ = 4; + return this; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Avro output format
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAvroSerializationOptions() { + if (avroSerializationOptionsBuilder_ == null) { + if (outputFormatSerializationOptionsCase_ == 4) { + outputFormatSerializationOptionsCase_ = 0; + outputFormatSerializationOptions_ = null; + onChanged(); + } + } else { + if (outputFormatSerializationOptionsCase_ == 4) { + outputFormatSerializationOptionsCase_ = 0; + outputFormatSerializationOptions_ = null; + } + avroSerializationOptionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Avro output format
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.Builder + getAvroSerializationOptionsBuilder() { + return getAvroSerializationOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Avro output format
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSerializationOptionsOrBuilder + getAvroSerializationOptionsOrBuilder() { + if ((outputFormatSerializationOptionsCase_ == 4) + && (avroSerializationOptionsBuilder_ != null)) { + return avroSerializationOptionsBuilder_.getMessageOrBuilder(); + } else { + if (outputFormatSerializationOptionsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AvroSerializationOptions) + outputFormatSerializationOptions_; + } + return com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.getDefaultInstance(); + } + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Avro output format
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSerializationOptions avro_serialization_options = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions, + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.Builder, + com.google.cloud.bigquery.storage.v1.AvroSerializationOptionsOrBuilder> + getAvroSerializationOptionsFieldBuilder() { + if (avroSerializationOptionsBuilder_ == null) { + if (!(outputFormatSerializationOptionsCase_ == 4)) { + outputFormatSerializationOptions_ = + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.getDefaultInstance(); + } + avroSerializationOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions, + com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.Builder, + com.google.cloud.bigquery.storage.v1.AvroSerializationOptionsOrBuilder>( + (com.google.cloud.bigquery.storage.v1.AvroSerializationOptions) + outputFormatSerializationOptions_, + getParentForChildren(), + isClean()); + outputFormatSerializationOptions_ = null; + } + outputFormatSerializationOptionsCase_ = 4; + onChanged(); + return avroSerializationOptionsBuilder_; + } + + private double samplePercentage_; + + /** + * + * + *
+       * Optional. Specifies a table sampling percentage. Specifically, the query
+       * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+       * sampling percentage is applied at the data block granularity. It will
+       * randomly choose for each data block whether to read the rows in that data
+       * block. For more details, see
+       * https://cloud.google.com/bigquery/docs/table-sampling)
+       * 
+ * + * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the samplePercentage field is set. + */ + @java.lang.Override + public boolean hasSamplePercentage() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+       * Optional. Specifies a table sampling percentage. Specifically, the query
+       * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+       * sampling percentage is applied at the data block granularity. It will
+       * randomly choose for each data block whether to read the rows in that data
+       * block. For more details, see
+       * https://cloud.google.com/bigquery/docs/table-sampling)
+       * 
+ * + * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The samplePercentage. + */ + @java.lang.Override + public double getSamplePercentage() { + return samplePercentage_; + } + + /** + * + * + *
+       * Optional. Specifies a table sampling percentage. Specifically, the query
+       * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+       * sampling percentage is applied at the data block granularity. It will
+       * randomly choose for each data block whether to read the rows in that data
+       * block. For more details, see
+       * https://cloud.google.com/bigquery/docs/table-sampling)
+       * 
+ * + * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The samplePercentage to set. + * @return This builder for chaining. + */ + public Builder setSamplePercentage(double value) { + + samplePercentage_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Specifies a table sampling percentage. Specifically, the query
+       * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+       * sampling percentage is applied at the data block granularity. It will
+       * randomly choose for each data block whether to read the rows in that data
+       * block. For more details, see
+       * https://cloud.google.com/bigquery/docs/table-sampling)
+       * 
+ * + * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearSamplePercentage() { + bitField0_ = (bitField0_ & ~0x00000010); + samplePercentage_ = 0D; + onChanged(); + return this; + } + + private int responseCompressionCodec_ = 0; + + /** + * + * + *
+       * Optional. Set response_compression_codec when creating a read session to
+       * enable application-level compression of ReadRows responses.
+       * 
+ * + * + * optional .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec response_compression_codec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the responseCompressionCodec field is set. + */ + @java.lang.Override + public boolean hasResponseCompressionCodec() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+       * Optional. Set response_compression_codec when creating a read session to
+       * enable application-level compression of ReadRows responses.
+       * 
+ * + * + * optional .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec response_compression_codec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for responseCompressionCodec. + */ + @java.lang.Override + public int getResponseCompressionCodecValue() { + return responseCompressionCodec_; + } + + /** + * + * + *
+       * Optional. Set response_compression_codec when creating a read session to
+       * enable application-level compression of ReadRows responses.
+       * 
+ * + * + * optional .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec response_compression_codec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for responseCompressionCodec to set. + * @return This builder for chaining. + */ + public Builder setResponseCompressionCodecValue(int value) { + responseCompressionCodec_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Set response_compression_codec when creating a read session to
+       * enable application-level compression of ReadRows responses.
+       * 
+ * + * + * optional .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec response_compression_codec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The responseCompressionCodec. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + .ResponseCompressionCodec + getResponseCompressionCodec() { + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec + result = + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + .ResponseCompressionCodec.forNumber(responseCompressionCodec_); + return result == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + .ResponseCompressionCodec.UNRECOGNIZED + : result; + } + + /** + * + * + *
+       * Optional. Set response_compression_codec when creating a read session to
+       * enable application-level compression of ReadRows responses.
+       * 
+ * + * + * optional .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec response_compression_codec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The responseCompressionCodec to set. + * @return This builder for chaining. + */ + public Builder setResponseCompressionCodec( + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec + value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + responseCompressionCodec_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Set response_compression_codec when creating a read session to
+       * enable application-level compression of ReadRows responses.
+       * 
+ * + * + * optional .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec response_compression_codec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearResponseCompressionCodec() { + bitField0_ = (bitField0_ & ~0x00000020); + responseCompressionCodec_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions) + private static final com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions(); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableReadOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + private int schemaCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object schema_; + + public enum SchemaCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + AVRO_SCHEMA(4), + ARROW_SCHEMA(5), + SCHEMA_NOT_SET(0); + private final int value; + + private SchemaCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SchemaCase valueOf(int value) { + return forNumber(value); + } + + public static SchemaCase forNumber(int value) { + switch (value) { + case 4: + return AVRO_SCHEMA; + case 5: + return ARROW_SCHEMA; + case 0: + return SCHEMA_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Output only. Unique identifier for the session, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. Unique identifier for the session, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EXPIRE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp expireTime_; + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time,
+   * subsequent requests to read this Session will return errors. The
+   * expire_time is automatically assigned and currently cannot be specified or
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + @java.lang.Override + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time,
+   * subsequent requests to read this Session will return errors. The
+   * expire_time is automatically assigned and currently cannot be specified or
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getExpireTime() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time,
+   * subsequent requests to read this Session will return errors. The
+   * expire_time is automatically assigned and currently cannot be specified or
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + public static final int DATA_FORMAT_FIELD_NUMBER = 3; + private int dataFormat_ = 0; + + /** + * + * + *
+   * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+   * supported.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for dataFormat. + */ + @java.lang.Override + public int getDataFormatValue() { + return dataFormat_; + } + + /** + * + * + *
+   * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+   * supported.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The dataFormat. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.DataFormat getDataFormat() { + com.google.cloud.bigquery.storage.v1.DataFormat result = + com.google.cloud.bigquery.storage.v1.DataFormat.forNumber(dataFormat_); + return result == null ? com.google.cloud.bigquery.storage.v1.DataFormat.UNRECOGNIZED : result; + } + + public static final int AVRO_SCHEMA_FIELD_NUMBER = 4; + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 4; + } + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSchema getAvroSchema() { + if (schemaCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder getAvroSchemaOrBuilder() { + if (schemaCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } + + public static final int ARROW_SCHEMA_FIELD_NUMBER = 5; + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 5; + } + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchema getArrowSchema() { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder getArrowSchemaOrBuilder() { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } + + public static final int TABLE_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
+   * Immutable. Table that this ReadSession is reading from, in the form
+   * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`
+   * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
+   * Immutable. Table that this ReadSession is reading from, in the form
+   * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`
+   * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TABLE_MODIFIERS_FIELD_NUMBER = 7; + private com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers tableModifiers_; + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified
+   * table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the tableModifiers field is set. + */ + @java.lang.Override + public boolean hasTableModifiers() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified
+   * table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The tableModifiers. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers getTableModifiers() { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.getDefaultInstance() + : tableModifiers_; + } + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified
+   * table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiersOrBuilder + getTableModifiersOrBuilder() { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.getDefaultInstance() + : tableModifiers_; + } + + public static final int READ_OPTIONS_FIELD_NUMBER = 8; + private com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions readOptions_; + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the readOptions field is set. + */ + @java.lang.Override + public boolean hasReadOptions() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The readOptions. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions getReadOptions() { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.getDefaultInstance() + : readOptions_; + } + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptionsOrBuilder + getReadOptionsOrBuilder() { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.getDefaultInstance() + : readOptions_; + } + + public static final int STREAMS_FIELD_NUMBER = 10; + + @SuppressWarnings("serial") + private java.util.List streams_; + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List getStreamsList() { + return streams_; + } + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getStreamsOrBuilderList() { + return streams_; + } + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getStreamsCount() { + return streams_.size(); + } + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadStream getStreams(int index) { + return streams_.get(index); + } + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder getStreamsOrBuilder(int index) { + return streams_.get(index); + } + + public static final int ESTIMATED_TOTAL_BYTES_SCANNED_FIELD_NUMBER = 12; + private long estimatedTotalBytesScanned_ = 0L; + + /** + * + * + *
+   * Output only. An estimate on the number of bytes this session will scan when
+   * all streams are completely consumed. This estimate is based on
+   * metadata from the table which might be incomplete or stale.
+   * 
+ * + * int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The estimatedTotalBytesScanned. + */ + @java.lang.Override + public long getEstimatedTotalBytesScanned() { + return estimatedTotalBytesScanned_; + } + + public static final int ESTIMATED_TOTAL_PHYSICAL_FILE_SIZE_FIELD_NUMBER = 15; + private long estimatedTotalPhysicalFileSize_ = 0L; + + /** + * + * + *
+   * Output only. A pre-projected estimate of the total physical size of files
+   * (in bytes) that this session will scan when all streams are consumed. This
+   * estimate is independent of the selected columns and can be based on
+   * incomplete or stale metadata from the table.  This field is only set for
+   * BigLake tables.
+   * 
+ * + * + * int64 estimated_total_physical_file_size = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The estimatedTotalPhysicalFileSize. + */ + @java.lang.Override + public long getEstimatedTotalPhysicalFileSize() { + return estimatedTotalPhysicalFileSize_; + } + + public static final int ESTIMATED_ROW_COUNT_FIELD_NUMBER = 14; + private long estimatedRowCount_ = 0L; + + /** + * + * + *
+   * Output only. An estimate on the number of rows present in this session's
+   * streams. This estimate is based on metadata from the table which might be
+   * incomplete or stale.
+   * 
+ * + * int64 estimated_row_count = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The estimatedRowCount. + */ + @java.lang.Override + public long getEstimatedRowCount() { + return estimatedRowCount_; + } + + public static final int TRACE_ID_FIELD_NUMBER = 13; + + @SuppressWarnings("serial") + private volatile java.lang.Object traceId_ = ""; + + /** + * + * + *
+   * Optional. ID set by client to annotate a session identity.  This does not
+   * need to be strictly unique, but instead the same ID should be used to group
+   * logically connected sessions (e.g. All using the same ID for all sessions
+   * needed to complete a Spark SQL query is reasonable).
+   *
+   * Maximum length is 256 bytes.
+   * 
+ * + * string trace_id = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + @java.lang.Override + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. ID set by client to annotate a session identity.  This does not
+   * need to be strictly unique, but instead the same ID should be used to group
+   * logically connected sessions (e.g. All using the same ID for all sessions
+   * needed to complete a Spark SQL query is reasonable).
+   *
+   * Maximum length is 256 bytes.
+   * 
+ * + * string trace_id = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getExpireTime()); + } + if (dataFormat_ + != com.google.cloud.bigquery.storage.v1.DataFormat.DATA_FORMAT_UNSPECIFIED.getNumber()) { + output.writeEnum(3, dataFormat_); + } + if (schemaCase_ == 4) { + output.writeMessage(4, (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_); + } + if (schemaCase_ == 5) { + output.writeMessage(5, (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(table_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, table_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(7, getTableModifiers()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(8, getReadOptions()); + } + for (int i = 0; i < streams_.size(); i++) { + output.writeMessage(10, streams_.get(i)); + } + if (estimatedTotalBytesScanned_ != 0L) { + output.writeInt64(12, estimatedTotalBytesScanned_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 13, traceId_); + } + if (estimatedRowCount_ != 0L) { + output.writeInt64(14, estimatedRowCount_); + } + if (estimatedTotalPhysicalFileSize_ != 0L) { + output.writeInt64(15, estimatedTotalPhysicalFileSize_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getExpireTime()); + } + if (dataFormat_ + != com.google.cloud.bigquery.storage.v1.DataFormat.DATA_FORMAT_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, dataFormat_); + } + if (schemaCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_); + } + if (schemaCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 5, (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(table_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, table_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getTableModifiers()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getReadOptions()); + } + for (int i = 0; i < streams_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, streams_.get(i)); + } + if (estimatedTotalBytesScanned_ != 0L) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size(12, estimatedTotalBytesScanned_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(13, traceId_); + } + if (estimatedRowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(14, estimatedRowCount_); + } + if (estimatedTotalPhysicalFileSize_ != 0L) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 15, estimatedTotalPhysicalFileSize_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ReadSession)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ReadSession other = + (com.google.cloud.bigquery.storage.v1.ReadSession) obj; + + if (!getName().equals(other.getName())) return false; + if (hasExpireTime() != other.hasExpireTime()) return false; + if (hasExpireTime()) { + if (!getExpireTime().equals(other.getExpireTime())) return false; + } + if (dataFormat_ != other.dataFormat_) return false; + if (!getTable().equals(other.getTable())) return false; + if (hasTableModifiers() != other.hasTableModifiers()) return false; + if (hasTableModifiers()) { + if (!getTableModifiers().equals(other.getTableModifiers())) return false; + } + if (hasReadOptions() != other.hasReadOptions()) return false; + if (hasReadOptions()) { + if (!getReadOptions().equals(other.getReadOptions())) return false; + } + if (!getStreamsList().equals(other.getStreamsList())) return false; + if (getEstimatedTotalBytesScanned() != other.getEstimatedTotalBytesScanned()) return false; + if (getEstimatedTotalPhysicalFileSize() != other.getEstimatedTotalPhysicalFileSize()) + return false; + if (getEstimatedRowCount() != other.getEstimatedRowCount()) return false; + if (!getTraceId().equals(other.getTraceId())) return false; + if (!getSchemaCase().equals(other.getSchemaCase())) return false; + switch (schemaCase_) { + case 4: + if (!getAvroSchema().equals(other.getAvroSchema())) return false; + break; + case 5: + if (!getArrowSchema().equals(other.getArrowSchema())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasExpireTime()) { + hash = (37 * hash) + EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getExpireTime().hashCode(); + } + hash = (37 * hash) + DATA_FORMAT_FIELD_NUMBER; + hash = (53 * hash) + dataFormat_; + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + if (hasTableModifiers()) { + hash = (37 * hash) + TABLE_MODIFIERS_FIELD_NUMBER; + hash = (53 * hash) + getTableModifiers().hashCode(); + } + if (hasReadOptions()) { + hash = (37 * hash) + READ_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getReadOptions().hashCode(); + } + if (getStreamsCount() > 0) { + hash = (37 * hash) + STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getStreamsList().hashCode(); + } + hash = (37 * hash) + ESTIMATED_TOTAL_BYTES_SCANNED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getEstimatedTotalBytesScanned()); + hash = (37 * hash) + ESTIMATED_TOTAL_PHYSICAL_FILE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getEstimatedTotalPhysicalFileSize()); + hash = (37 * hash) + ESTIMATED_ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getEstimatedRowCount()); + hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTraceId().hashCode(); + switch (schemaCase_) { + case 4: + hash = (37 * hash) + AVRO_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getAvroSchema().hashCode(); + break; + case 5: + hash = (37 * hash) + ARROW_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getArrowSchema().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.ReadSession prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information about the ReadSession.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ReadSession} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ReadSession) + com.google.cloud.bigquery.storage.v1.ReadSessionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ReadSession.class, + com.google.cloud.bigquery.storage.v1.ReadSession.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.ReadSession.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getExpireTimeFieldBuilder(); + getTableModifiersFieldBuilder(); + getReadOptionsFieldBuilder(); + getStreamsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + dataFormat_ = 0; + if (avroSchemaBuilder_ != null) { + avroSchemaBuilder_.clear(); + } + if (arrowSchemaBuilder_ != null) { + arrowSchemaBuilder_.clear(); + } + table_ = ""; + tableModifiers_ = null; + if (tableModifiersBuilder_ != null) { + tableModifiersBuilder_.dispose(); + tableModifiersBuilder_ = null; + } + readOptions_ = null; + if (readOptionsBuilder_ != null) { + readOptionsBuilder_.dispose(); + readOptionsBuilder_ = null; + } + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + } else { + streams_ = null; + streamsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + estimatedTotalBytesScanned_ = 0L; + estimatedTotalPhysicalFileSize_ = 0L; + estimatedRowCount_ = 0L; + traceId_ = ""; + schemaCase_ = 0; + schema_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadSession_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ReadSession.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession build() { + com.google.cloud.bigquery.storage.v1.ReadSession result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession buildPartial() { + com.google.cloud.bigquery.storage.v1.ReadSession result = + new com.google.cloud.bigquery.storage.v1.ReadSession(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1.ReadSession result) { + if (streamsBuilder_ == null) { + if (((bitField0_ & 0x00000100) != 0)) { + streams_ = java.util.Collections.unmodifiableList(streams_); + bitField0_ = (bitField0_ & ~0x00000100); + } + result.streams_ = streams_; + } else { + result.streams_ = streamsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.ReadSession result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.expireTime_ = expireTimeBuilder_ == null ? expireTime_ : expireTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.dataFormat_ = dataFormat_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.table_ = table_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.tableModifiers_ = + tableModifiersBuilder_ == null ? tableModifiers_ : tableModifiersBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.readOptions_ = + readOptionsBuilder_ == null ? readOptions_ : readOptionsBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.estimatedTotalBytesScanned_ = estimatedTotalBytesScanned_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.estimatedTotalPhysicalFileSize_ = estimatedTotalPhysicalFileSize_; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + result.estimatedRowCount_ = estimatedRowCount_; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.traceId_ = traceId_; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.cloud.bigquery.storage.v1.ReadSession result) { + result.schemaCase_ = schemaCase_; + result.schema_ = this.schema_; + if (schemaCase_ == 4 && avroSchemaBuilder_ != null) { + result.schema_ = avroSchemaBuilder_.build(); + } + if (schemaCase_ == 5 && arrowSchemaBuilder_ != null) { + result.schema_ = arrowSchemaBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ReadSession) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ReadSession) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ReadSession other) { + if (other == com.google.cloud.bigquery.storage.v1.ReadSession.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasExpireTime()) { + mergeExpireTime(other.getExpireTime()); + } + if (other.dataFormat_ != 0) { + setDataFormatValue(other.getDataFormatValue()); + } + if (!other.getTable().isEmpty()) { + table_ = other.table_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (other.hasTableModifiers()) { + mergeTableModifiers(other.getTableModifiers()); + } + if (other.hasReadOptions()) { + mergeReadOptions(other.getReadOptions()); + } + if (streamsBuilder_ == null) { + if (!other.streams_.isEmpty()) { + if (streams_.isEmpty()) { + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000100); + } else { + ensureStreamsIsMutable(); + streams_.addAll(other.streams_); + } + onChanged(); + } + } else { + if (!other.streams_.isEmpty()) { + if (streamsBuilder_.isEmpty()) { + streamsBuilder_.dispose(); + streamsBuilder_ = null; + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000100); + streamsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getStreamsFieldBuilder() + : null; + } else { + streamsBuilder_.addAllMessages(other.streams_); + } + } + } + if (other.getEstimatedTotalBytesScanned() != 0L) { + setEstimatedTotalBytesScanned(other.getEstimatedTotalBytesScanned()); + } + if (other.getEstimatedTotalPhysicalFileSize() != 0L) { + setEstimatedTotalPhysicalFileSize(other.getEstimatedTotalPhysicalFileSize()); + } + if (other.getEstimatedRowCount() != 0L) { + setEstimatedRowCount(other.getEstimatedRowCount()); + } + if (!other.getTraceId().isEmpty()) { + traceId_ = other.traceId_; + bitField0_ |= 0x00001000; + onChanged(); + } + switch (other.getSchemaCase()) { + case AVRO_SCHEMA: + { + mergeAvroSchema(other.getAvroSchema()); + break; + } + case ARROW_SCHEMA: + { + mergeArrowSchema(other.getArrowSchema()); + break; + } + case SCHEMA_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + dataFormat_ = input.readEnum(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + input.readMessage(getAvroSchemaFieldBuilder().getBuilder(), extensionRegistry); + schemaCase_ = 4; + break; + } // case 34 + case 42: + { + input.readMessage(getArrowSchemaFieldBuilder().getBuilder(), extensionRegistry); + schemaCase_ = 5; + break; + } // case 42 + case 50: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 58: + { + input.readMessage(getTableModifiersFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 58 + case 66: + { + input.readMessage(getReadOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 66 + case 82: + { + com.google.cloud.bigquery.storage.v1.ReadStream m = + input.readMessage( + com.google.cloud.bigquery.storage.v1.ReadStream.parser(), + extensionRegistry); + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(m); + } else { + streamsBuilder_.addMessage(m); + } + break; + } // case 82 + case 96: + { + estimatedTotalBytesScanned_ = input.readInt64(); + bitField0_ |= 0x00000200; + break; + } // case 96 + case 106: + { + traceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00001000; + break; + } // case 106 + case 112: + { + estimatedRowCount_ = input.readInt64(); + bitField0_ |= 0x00000800; + break; + } // case 112 + case 120: + { + estimatedTotalPhysicalFileSize_ = input.readInt64(); + bitField0_ |= 0x00000400; + break; + } // case 120 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int schemaCase_ = 0; + private java.lang.Object schema_; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public Builder clearSchema() { + schemaCase_ = 0; + schema_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp expireTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + expireTimeBuilder_; + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + if (expireTimeBuilder_ == null) { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } else { + return expireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + expireTime_ = value; + } else { + expireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (expireTimeBuilder_ == null) { + expireTime_ = builderForValue.build(); + } else { + expireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && expireTime_ != null + && expireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getExpireTimeBuilder().mergeFrom(value); + } else { + expireTime_ = value; + } + } else { + expireTimeBuilder_.mergeFrom(value); + } + if (expireTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearExpireTime() { + bitField0_ = (bitField0_ & ~0x00000002); + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + if (expireTimeBuilder_ != null) { + return expireTimeBuilder_.getMessageOrBuilder(); + } else { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getExpireTimeFieldBuilder() { + if (expireTimeBuilder_ == null) { + expireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getExpireTime(), getParentForChildren(), isClean()); + expireTime_ = null; + } + return expireTimeBuilder_; + } + + private int dataFormat_ = 0; + + /** + * + * + *
+     * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+     * supported.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for dataFormat. + */ + @java.lang.Override + public int getDataFormatValue() { + return dataFormat_; + } + + /** + * + * + *
+     * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+     * supported.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The enum numeric value on the wire for dataFormat to set. + * @return This builder for chaining. + */ + public Builder setDataFormatValue(int value) { + dataFormat_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+     * supported.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The dataFormat. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.DataFormat getDataFormat() { + com.google.cloud.bigquery.storage.v1.DataFormat result = + com.google.cloud.bigquery.storage.v1.DataFormat.forNumber(dataFormat_); + return result == null ? com.google.cloud.bigquery.storage.v1.DataFormat.UNRECOGNIZED : result; + } + + /** + * + * + *
+     * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+     * supported.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The dataFormat to set. + * @return This builder for chaining. + */ + public Builder setDataFormat(com.google.cloud.bigquery.storage.v1.DataFormat value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + dataFormat_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+     * supported.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return This builder for chaining. + */ + public Builder clearDataFormat() { + bitField0_ = (bitField0_ & ~0x00000004); + dataFormat_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AvroSchema, + com.google.cloud.bigquery.storage.v1.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder> + avroSchemaBuilder_; + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 4; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSchema getAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } else { + if (schemaCase_ == 4) { + return avroSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema(com.google.cloud.bigquery.storage.v1.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + avroSchemaBuilder_.setMessage(value); + } + schemaCase_ = 4; + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema( + com.google.cloud.bigquery.storage.v1.AvroSchema.Builder builderForValue) { + if (avroSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + avroSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 4; + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeAvroSchema(com.google.cloud.bigquery.storage.v1.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 4 + && schema_ != com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1.AvroSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 4) { + avroSchemaBuilder_.mergeFrom(value); + } else { + avroSchemaBuilder_.setMessage(value); + } + } + schemaCase_ = 4; + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 4) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 4) { + schemaCase_ = 0; + schema_ = null; + } + avroSchemaBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.AvroSchema.Builder getAvroSchemaBuilder() { + return getAvroSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder getAvroSchemaOrBuilder() { + if ((schemaCase_ == 4) && (avroSchemaBuilder_ != null)) { + return avroSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AvroSchema, + com.google.cloud.bigquery.storage.v1.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder> + getAvroSchemaFieldBuilder() { + if (avroSchemaBuilder_ == null) { + if (!(schemaCase_ == 4)) { + schema_ = com.google.cloud.bigquery.storage.v1.AvroSchema.getDefaultInstance(); + } + avroSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.AvroSchema, + com.google.cloud.bigquery.storage.v1.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1.AvroSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 4; + onChanged(); + return avroSchemaBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowSchema, + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder> + arrowSchemaBuilder_; + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 5; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchema getArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } else { + if (schemaCase_ == 5) { + return arrowSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema(com.google.cloud.bigquery.storage.v1.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(value); + } + schemaCase_ = 5; + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema( + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder builderForValue) { + if (arrowSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 5; + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeArrowSchema(com.google.cloud.bigquery.storage.v1.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 5 + && schema_ != com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1.ArrowSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 5) { + arrowSchemaBuilder_.mergeFrom(value); + } else { + arrowSchemaBuilder_.setMessage(value); + } + } + schemaCase_ = 5; + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 5) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 5) { + schemaCase_ = 0; + schema_ = null; + } + arrowSchemaBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder getArrowSchemaBuilder() { + return getArrowSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder getArrowSchemaOrBuilder() { + if ((schemaCase_ == 5) && (arrowSchemaBuilder_ != null)) { + return arrowSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowSchema, + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder> + getArrowSchemaFieldBuilder() { + if (arrowSchemaBuilder_ == null) { + if (!(schemaCase_ == 5)) { + schema_ = com.google.cloud.bigquery.storage.v1.ArrowSchema.getDefaultInstance(); + } + arrowSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ArrowSchema, + com.google.cloud.bigquery.storage.v1.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1.ArrowSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 5; + onChanged(); + return arrowSchemaBuilder_; + } + + private java.lang.Object table_ = ""; + + /** + * + * + *
+     * Immutable. Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`
+     * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Immutable. Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`
+     * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Immutable. Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`
+     * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`
+     * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`
+     * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers tableModifiers_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers, + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiersOrBuilder> + tableModifiersBuilder_; + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the tableModifiers field is set. + */ + public boolean hasTableModifiers() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The tableModifiers. + */ + public com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers getTableModifiers() { + if (tableModifiersBuilder_ == null) { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.getDefaultInstance() + : tableModifiers_; + } else { + return tableModifiersBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setTableModifiers( + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers value) { + if (tableModifiersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableModifiers_ = value; + } else { + tableModifiersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setTableModifiers( + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.Builder builderForValue) { + if (tableModifiersBuilder_ == null) { + tableModifiers_ = builderForValue.build(); + } else { + tableModifiersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeTableModifiers( + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers value) { + if (tableModifiersBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && tableModifiers_ != null + && tableModifiers_ + != com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers + .getDefaultInstance()) { + getTableModifiersBuilder().mergeFrom(value); + } else { + tableModifiers_ = value; + } + } else { + tableModifiersBuilder_.mergeFrom(value); + } + if (tableModifiers_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearTableModifiers() { + bitField0_ = (bitField0_ & ~0x00000040); + tableModifiers_ = null; + if (tableModifiersBuilder_ != null) { + tableModifiersBuilder_.dispose(); + tableModifiersBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.Builder + getTableModifiersBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return getTableModifiersFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiersOrBuilder + getTableModifiersOrBuilder() { + if (tableModifiersBuilder_ != null) { + return tableModifiersBuilder_.getMessageOrBuilder(); + } else { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.getDefaultInstance() + : tableModifiers_; + } + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers, + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiersOrBuilder> + getTableModifiersFieldBuilder() { + if (tableModifiersBuilder_ == null) { + tableModifiersBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers, + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiersOrBuilder>( + getTableModifiers(), getParentForChildren(), isClean()); + tableModifiers_ = null; + } + return tableModifiersBuilder_; + } + + private com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions readOptions_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions, + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.Builder, + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptionsOrBuilder> + readOptionsBuilder_; + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the readOptions field is set. + */ + public boolean hasReadOptions() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The readOptions. + */ + public com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions getReadOptions() { + if (readOptionsBuilder_ == null) { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.getDefaultInstance() + : readOptions_; + } else { + return readOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setReadOptions( + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions value) { + if (readOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readOptions_ = value; + } else { + readOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setReadOptions( + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.Builder builderForValue) { + if (readOptionsBuilder_ == null) { + readOptions_ = builderForValue.build(); + } else { + readOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeReadOptions( + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions value) { + if (readOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && readOptions_ != null + && readOptions_ + != com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions + .getDefaultInstance()) { + getReadOptionsBuilder().mergeFrom(value); + } else { + readOptions_ = value; + } + } else { + readOptionsBuilder_.mergeFrom(value); + } + if (readOptions_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearReadOptions() { + bitField0_ = (bitField0_ & ~0x00000080); + readOptions_ = null; + if (readOptionsBuilder_ != null) { + readOptionsBuilder_.dispose(); + readOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.Builder + getReadOptionsBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return getReadOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptionsOrBuilder + getReadOptionsOrBuilder() { + if (readOptionsBuilder_ != null) { + return readOptionsBuilder_.getMessageOrBuilder(); + } else { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.getDefaultInstance() + : readOptions_; + } + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions, + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.Builder, + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptionsOrBuilder> + getReadOptionsFieldBuilder() { + if (readOptionsBuilder_ == null) { + readOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions, + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.Builder, + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptionsOrBuilder>( + getReadOptions(), getParentForChildren(), isClean()); + readOptions_ = null; + } + return readOptionsBuilder_; + } + + private java.util.List streams_ = + java.util.Collections.emptyList(); + + private void ensureStreamsIsMutable() { + if (!((bitField0_ & 0x00000100) != 0)) { + streams_ = + new java.util.ArrayList(streams_); + bitField0_ |= 0x00000100; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadStream, + com.google.cloud.bigquery.storage.v1.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder> + streamsBuilder_; + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List getStreamsList() { + if (streamsBuilder_ == null) { + return java.util.Collections.unmodifiableList(streams_); + } else { + return streamsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getStreamsCount() { + if (streamsBuilder_ == null) { + return streams_.size(); + } else { + return streamsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.ReadStream getStreams(int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStreams(int index, com.google.cloud.bigquery.storage.v1.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.set(index, value); + onChanged(); + } else { + streamsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStreams( + int index, com.google.cloud.bigquery.storage.v1.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.set(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams(com.google.cloud.bigquery.storage.v1.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(value); + onChanged(); + } else { + streamsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams(int index, com.google.cloud.bigquery.storage.v1.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(index, value); + onChanged(); + } else { + streamsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + com.google.cloud.bigquery.storage.v1.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + int index, com.google.cloud.bigquery.storage.v1.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllStreams( + java.lang.Iterable values) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, streams_); + onChanged(); + } else { + streamsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearStreams() { + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + } else { + streamsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeStreams(int index) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.remove(index); + onChanged(); + } else { + streamsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.ReadStream.Builder getStreamsBuilder(int index) { + return getStreamsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder getStreamsOrBuilder(int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStreamsOrBuilderList() { + if (streamsBuilder_ != null) { + return streamsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(streams_); + } + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.ReadStream.Builder addStreamsBuilder() { + return getStreamsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.ReadStream.Builder addStreamsBuilder(int index) { + return getStreamsFieldBuilder() + .addBuilder(index, com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStreamsBuilderList() { + return getStreamsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadStream, + com.google.cloud.bigquery.storage.v1.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder> + getStreamsFieldBuilder() { + if (streamsBuilder_ == null) { + streamsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadStream, + com.google.cloud.bigquery.storage.v1.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder>( + streams_, ((bitField0_ & 0x00000100) != 0), getParentForChildren(), isClean()); + streams_ = null; + } + return streamsBuilder_; + } + + private long estimatedTotalBytesScanned_; + + /** + * + * + *
+     * Output only. An estimate on the number of bytes this session will scan when
+     * all streams are completely consumed. This estimate is based on
+     * metadata from the table which might be incomplete or stale.
+     * 
+ * + * int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The estimatedTotalBytesScanned. + */ + @java.lang.Override + public long getEstimatedTotalBytesScanned() { + return estimatedTotalBytesScanned_; + } + + /** + * + * + *
+     * Output only. An estimate on the number of bytes this session will scan when
+     * all streams are completely consumed. This estimate is based on
+     * metadata from the table which might be incomplete or stale.
+     * 
+ * + * int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The estimatedTotalBytesScanned to set. + * @return This builder for chaining. + */ + public Builder setEstimatedTotalBytesScanned(long value) { + + estimatedTotalBytesScanned_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. An estimate on the number of bytes this session will scan when
+     * all streams are completely consumed. This estimate is based on
+     * metadata from the table which might be incomplete or stale.
+     * 
+ * + * int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearEstimatedTotalBytesScanned() { + bitField0_ = (bitField0_ & ~0x00000200); + estimatedTotalBytesScanned_ = 0L; + onChanged(); + return this; + } + + private long estimatedTotalPhysicalFileSize_; + + /** + * + * + *
+     * Output only. A pre-projected estimate of the total physical size of files
+     * (in bytes) that this session will scan when all streams are consumed. This
+     * estimate is independent of the selected columns and can be based on
+     * incomplete or stale metadata from the table.  This field is only set for
+     * BigLake tables.
+     * 
+ * + * + * int64 estimated_total_physical_file_size = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The estimatedTotalPhysicalFileSize. + */ + @java.lang.Override + public long getEstimatedTotalPhysicalFileSize() { + return estimatedTotalPhysicalFileSize_; + } + + /** + * + * + *
+     * Output only. A pre-projected estimate of the total physical size of files
+     * (in bytes) that this session will scan when all streams are consumed. This
+     * estimate is independent of the selected columns and can be based on
+     * incomplete or stale metadata from the table.  This field is only set for
+     * BigLake tables.
+     * 
+ * + * + * int64 estimated_total_physical_file_size = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The estimatedTotalPhysicalFileSize to set. + * @return This builder for chaining. + */ + public Builder setEstimatedTotalPhysicalFileSize(long value) { + + estimatedTotalPhysicalFileSize_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. A pre-projected estimate of the total physical size of files
+     * (in bytes) that this session will scan when all streams are consumed. This
+     * estimate is independent of the selected columns and can be based on
+     * incomplete or stale metadata from the table.  This field is only set for
+     * BigLake tables.
+     * 
+ * + * + * int64 estimated_total_physical_file_size = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearEstimatedTotalPhysicalFileSize() { + bitField0_ = (bitField0_ & ~0x00000400); + estimatedTotalPhysicalFileSize_ = 0L; + onChanged(); + return this; + } + + private long estimatedRowCount_; + + /** + * + * + *
+     * Output only. An estimate on the number of rows present in this session's
+     * streams. This estimate is based on metadata from the table which might be
+     * incomplete or stale.
+     * 
+ * + * int64 estimated_row_count = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The estimatedRowCount. + */ + @java.lang.Override + public long getEstimatedRowCount() { + return estimatedRowCount_; + } + + /** + * + * + *
+     * Output only. An estimate on the number of rows present in this session's
+     * streams. This estimate is based on metadata from the table which might be
+     * incomplete or stale.
+     * 
+ * + * int64 estimated_row_count = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The estimatedRowCount to set. + * @return This builder for chaining. + */ + public Builder setEstimatedRowCount(long value) { + + estimatedRowCount_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. An estimate on the number of rows present in this session's
+     * streams. This estimate is based on metadata from the table which might be
+     * incomplete or stale.
+     * 
+ * + * int64 estimated_row_count = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearEstimatedRowCount() { + bitField0_ = (bitField0_ & ~0x00000800); + estimatedRowCount_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object traceId_ = ""; + + /** + * + * + *
+     * Optional. ID set by client to annotate a session identity.  This does not
+     * need to be strictly unique, but instead the same ID should be used to group
+     * logically connected sessions (e.g. All using the same ID for all sessions
+     * needed to complete a Spark SQL query is reasonable).
+     *
+     * Maximum length is 256 bytes.
+     * 
+ * + * string trace_id = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. ID set by client to annotate a session identity.  This does not
+     * need to be strictly unique, but instead the same ID should be used to group
+     * logically connected sessions (e.g. All using the same ID for all sessions
+     * needed to complete a Spark SQL query is reasonable).
+     *
+     * Maximum length is 256 bytes.
+     * 
+ * + * string trace_id = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. ID set by client to annotate a session identity.  This does not
+     * need to be strictly unique, but instead the same ID should be used to group
+     * logically connected sessions (e.g. All using the same ID for all sessions
+     * needed to complete a Spark SQL query is reasonable).
+     *
+     * Maximum length is 256 bytes.
+     * 
+ * + * string trace_id = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + traceId_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. ID set by client to annotate a session identity.  This does not
+     * need to be strictly unique, but instead the same ID should be used to group
+     * logically connected sessions (e.g. All using the same ID for all sessions
+     * needed to complete a Spark SQL query is reasonable).
+     *
+     * Maximum length is 256 bytes.
+     * 
+ * + * string trace_id = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearTraceId() { + traceId_ = getDefaultInstance().getTraceId(); + bitField0_ = (bitField0_ & ~0x00001000); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. ID set by client to annotate a session identity.  This does not
+     * need to be strictly unique, but instead the same ID should be used to group
+     * logically connected sessions (e.g. All using the same ID for all sessions
+     * needed to complete a Spark SQL query is reasonable).
+     *
+     * Maximum length is 256 bytes.
+     * 
+ * + * string trace_id = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + traceId_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ReadSession) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ReadSession) + private static final com.google.cloud.bigquery.storage.v1.ReadSession DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ReadSession(); + } + + public static com.google.cloud.bigquery.storage.v1.ReadSession getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadSession parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadSession getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java new file mode 100644 index 000000000000..8c90e760a1b1 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java @@ -0,0 +1,523 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface ReadSessionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ReadSession) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. Unique identifier for the session, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Output only. Unique identifier for the session, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time,
+   * subsequent requests to read this Session will return errors. The
+   * expire_time is automatically assigned and currently cannot be specified or
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + boolean hasExpireTime(); + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time,
+   * subsequent requests to read this Session will return errors. The
+   * expire_time is automatically assigned and currently cannot be specified or
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + com.google.protobuf.Timestamp getExpireTime(); + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time,
+   * subsequent requests to read this Session will return errors. The
+   * expire_time is automatically assigned and currently cannot be specified or
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder(); + + /** + * + * + *
+   * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+   * supported.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for dataFormat. + */ + int getDataFormatValue(); + + /** + * + * + *
+   * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not
+   * supported.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The dataFormat. + */ + com.google.cloud.bigquery.storage.v1.DataFormat getDataFormat(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + boolean hasAvroSchema(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + com.google.cloud.bigquery.storage.v1.AvroSchema getAvroSchema(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1.AvroSchemaOrBuilder getAvroSchemaOrBuilder(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + boolean hasArrowSchema(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + com.google.cloud.bigquery.storage.v1.ArrowSchema getArrowSchema(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1.ArrowSchemaOrBuilder getArrowSchemaOrBuilder(); + + /** + * + * + *
+   * Immutable. Table that this ReadSession is reading from, in the form
+   * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`
+   * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
+   * Immutable. Table that this ReadSession is reading from, in the form
+   * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`
+   * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified
+   * table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the tableModifiers field is set. + */ + boolean hasTableModifiers(); + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified
+   * table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The tableModifiers. + */ + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers getTableModifiers(); + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified
+   * table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiersOrBuilder + getTableModifiersOrBuilder(); + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the readOptions field is set. + */ + boolean hasReadOptions(); + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The readOptions. + */ + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions getReadOptions(); + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptionsOrBuilder + getReadOptionsOrBuilder(); + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getStreamsList(); + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1.ReadStream getStreams(int index); + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getStreamsCount(); + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getStreamsOrBuilderList(); + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder getStreamsOrBuilder(int index); + + /** + * + * + *
+   * Output only. An estimate on the number of bytes this session will scan when
+   * all streams are completely consumed. This estimate is based on
+   * metadata from the table which might be incomplete or stale.
+   * 
+ * + * int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The estimatedTotalBytesScanned. + */ + long getEstimatedTotalBytesScanned(); + + /** + * + * + *
+   * Output only. A pre-projected estimate of the total physical size of files
+   * (in bytes) that this session will scan when all streams are consumed. This
+   * estimate is independent of the selected columns and can be based on
+   * incomplete or stale metadata from the table.  This field is only set for
+   * BigLake tables.
+   * 
+ * + * + * int64 estimated_total_physical_file_size = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The estimatedTotalPhysicalFileSize. + */ + long getEstimatedTotalPhysicalFileSize(); + + /** + * + * + *
+   * Output only. An estimate on the number of rows present in this session's
+   * streams. This estimate is based on metadata from the table which might be
+   * incomplete or stale.
+   * 
+ * + * int64 estimated_row_count = 14 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The estimatedRowCount. + */ + long getEstimatedRowCount(); + + /** + * + * + *
+   * Optional. ID set by client to annotate a session identity.  This does not
+   * need to be strictly unique, but instead the same ID should be used to group
+   * logically connected sessions (e.g. All using the same ID for all sessions
+   * needed to complete a Spark SQL query is reasonable).
+   *
+   * Maximum length is 256 bytes.
+   * 
+ * + * string trace_id = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + java.lang.String getTraceId(); + + /** + * + * + *
+   * Optional. ID set by client to annotate a session identity.  This does not
+   * need to be strictly unique, but instead the same ID should be used to group
+   * logically connected sessions (e.g. All using the same ID for all sessions
+   * needed to complete a Spark SQL query is reasonable).
+   *
+   * Maximum length is 256 bytes.
+   * 
+ * + * string trace_id = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + com.google.protobuf.ByteString getTraceIdBytes(); + + com.google.cloud.bigquery.storage.v1.ReadSession.SchemaCase getSchemaCase(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStream.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStream.java new file mode 100644 index 000000000000..66249ea3bb9d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStream.java @@ -0,0 +1,645 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Information about a single stream that gets data out of the storage system.
+ * Most of the information about `ReadStream` instances is aggregated, making
+ * `ReadStream` lightweight.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ReadStream} + */ +public final class ReadStream extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ReadStream) + ReadStreamOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadStream.newBuilder() to construct. + private ReadStream(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadStream() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadStream(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ReadStream.class, + com.google.cloud.bigquery.storage.v1.ReadStream.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ReadStream)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ReadStream other = + (com.google.cloud.bigquery.storage.v1.ReadStream) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.ReadStream prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information about a single stream that gets data out of the storage system.
+   * Most of the information about `ReadStream` instances is aggregated, making
+   * `ReadStream` lightweight.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ReadStream} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ReadStream) + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ReadStream.class, + com.google.cloud.bigquery.storage.v1.ReadStream.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.ReadStream.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_ReadStream_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadStream getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadStream build() { + com.google.cloud.bigquery.storage.v1.ReadStream result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadStream buildPartial() { + com.google.cloud.bigquery.storage.v1.ReadStream result = + new com.google.cloud.bigquery.storage.v1.ReadStream(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.ReadStream result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ReadStream) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ReadStream) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ReadStream other) { + if (other == com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ReadStream) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ReadStream) + private static final com.google.cloud.bigquery.storage.v1.ReadStream DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ReadStream(); + } + + public static com.google.cloud.bigquery.storage.v1.ReadStream getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadStream parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadStream getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamName.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamName.java new file mode 100644 index 000000000000..d35aab6c7fde --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamName.java @@ -0,0 +1,257 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ReadStreamName implements ResourceName { + private static final PathTemplate PROJECT_LOCATION_SESSION_STREAM = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/locations/{location}/sessions/{session}/streams/{stream}"); + private volatile Map fieldValuesMap; + private final String project; + private final String location; + private final String session; + private final String stream; + + @Deprecated + protected ReadStreamName() { + project = null; + location = null; + session = null; + stream = null; + } + + private ReadStreamName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + location = Preconditions.checkNotNull(builder.getLocation()); + session = Preconditions.checkNotNull(builder.getSession()); + stream = Preconditions.checkNotNull(builder.getStream()); + } + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getSession() { + return session; + } + + public String getStream() { + return stream; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ReadStreamName of(String project, String location, String session, String stream) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setSession(session) + .setStream(stream) + .build(); + } + + public static String format(String project, String location, String session, String stream) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setSession(session) + .setStream(stream) + .build() + .toString(); + } + + public static ReadStreamName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_LOCATION_SESSION_STREAM.validatedMatch( + formattedString, "ReadStreamName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("location"), + matchMap.get("session"), + matchMap.get("stream")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ReadStreamName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_LOCATION_SESSION_STREAM.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (location != null) { + fieldMapBuilder.put("location", location); + } + if (session != null) { + fieldMapBuilder.put("session", session); + } + if (stream != null) { + fieldMapBuilder.put("stream", stream); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_LOCATION_SESSION_STREAM.instantiate( + "project", project, "location", location, "session", session, "stream", stream); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + ReadStreamName that = ((ReadStreamName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.location, that.location) + && Objects.equals(this.session, that.session) + && Objects.equals(this.stream, that.stream); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(location); + h *= 1000003; + h ^= Objects.hashCode(session); + h *= 1000003; + h ^= Objects.hashCode(stream); + return h; + } + + /** Builder for projects/{project}/locations/{location}/sessions/{session}/streams/{stream}. */ + public static class Builder { + private String project; + private String location; + private String session; + private String stream; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getSession() { + return session; + } + + public String getStream() { + return stream; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setSession(String session) { + this.session = session; + return this; + } + + public Builder setStream(String stream) { + this.stream = stream; + return this; + } + + private Builder(ReadStreamName readStreamName) { + this.project = readStreamName.project; + this.location = readStreamName.location; + this.session = readStreamName.session; + this.stream = readStreamName.stream; + } + + public ReadStreamName build() { + return new ReadStreamName(this); + } + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamOrBuilder.java new file mode 100644 index 000000000000..1d5a670eb2f4 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface ReadStreamOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ReadStream) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowError.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowError.java new file mode 100644 index 000000000000..70ecc0bde50b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowError.java @@ -0,0 +1,1029 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * The message that presents row level error info in a request.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.RowError} + */ +public final class RowError extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.RowError) + RowErrorOrBuilder { + private static final long serialVersionUID = 0L; + + // Use RowError.newBuilder() to construct. + private RowError(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private RowError() { + code_ = 0; + message_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new RowError(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_RowError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_RowError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.RowError.class, + com.google.cloud.bigquery.storage.v1.RowError.Builder.class); + } + + /** + * + * + *
+   * Error code for `RowError`.
+   * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1.RowError.RowErrorCode} + */ + public enum RowErrorCode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Default error.
+     * 
+ * + * ROW_ERROR_CODE_UNSPECIFIED = 0; + */ + ROW_ERROR_CODE_UNSPECIFIED(0), + /** + * + * + *
+     * One or more fields in the row has errors.
+     * 
+ * + * FIELDS_ERROR = 1; + */ + FIELDS_ERROR(1), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Default error.
+     * 
+ * + * ROW_ERROR_CODE_UNSPECIFIED = 0; + */ + public static final int ROW_ERROR_CODE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * One or more fields in the row has errors.
+     * 
+ * + * FIELDS_ERROR = 1; + */ + public static final int FIELDS_ERROR_VALUE = 1; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RowErrorCode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static RowErrorCode forNumber(int value) { + switch (value) { + case 0: + return ROW_ERROR_CODE_UNSPECIFIED; + case 1: + return FIELDS_ERROR; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public RowErrorCode findValueByNumber(int number) { + return RowErrorCode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.RowError.getDescriptor().getEnumTypes().get(0); + } + + private static final RowErrorCode[] VALUES = values(); + + public static RowErrorCode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private RowErrorCode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.RowError.RowErrorCode) + } + + public static final int INDEX_FIELD_NUMBER = 1; + private long index_ = 0L; + + /** + * + * + *
+   * Index of the malformed row in the request.
+   * 
+ * + * int64 index = 1; + * + * @return The index. + */ + @java.lang.Override + public long getIndex() { + return index_; + } + + public static final int CODE_FIELD_NUMBER = 2; + private int code_ = 0; + + /** + * + * + *
+   * Structured error reason for a row error.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return The enum numeric value on the wire for code. + */ + @java.lang.Override + public int getCodeValue() { + return code_; + } + + /** + * + * + *
+   * Structured error reason for a row error.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return The code. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode getCode() { + com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode result = + com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode.forNumber(code_); + return result == null + ? com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode.UNRECOGNIZED + : result; + } + + public static final int MESSAGE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object message_ = ""; + + /** + * + * + *
+   * Description of the issue encountered when processing the row.
+   * 
+ * + * string message = 3; + * + * @return The message. + */ + @java.lang.Override + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + message_ = s; + return s; + } + } + + /** + * + * + *
+   * Description of the issue encountered when processing the row.
+   * 
+ * + * string message = 3; + * + * @return The bytes for message. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (index_ != 0L) { + output.writeInt64(1, index_); + } + if (code_ + != com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode.ROW_ERROR_CODE_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, code_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(message_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, message_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (index_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, index_); + } + if (code_ + != com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode.ROW_ERROR_CODE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, code_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(message_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, message_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.RowError)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.RowError other = + (com.google.cloud.bigquery.storage.v1.RowError) obj; + + if (getIndex() != other.getIndex()) return false; + if (code_ != other.code_) return false; + if (!getMessage().equals(other.getMessage())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INDEX_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIndex()); + hash = (37 * hash) + CODE_FIELD_NUMBER; + hash = (53 * hash) + code_; + hash = (37 * hash) + MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getMessage().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.RowError prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * The message that presents row level error info in a request.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.RowError} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.RowError) + com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_RowError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_RowError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.RowError.class, + com.google.cloud.bigquery.storage.v1.RowError.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.RowError.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + index_ = 0L; + code_ = 0; + message_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_RowError_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.RowError.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError build() { + com.google.cloud.bigquery.storage.v1.RowError result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError buildPartial() { + com.google.cloud.bigquery.storage.v1.RowError result = + new com.google.cloud.bigquery.storage.v1.RowError(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.RowError result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.index_ = index_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.code_ = code_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.message_ = message_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.RowError) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.RowError) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.RowError other) { + if (other == com.google.cloud.bigquery.storage.v1.RowError.getDefaultInstance()) return this; + if (other.getIndex() != 0L) { + setIndex(other.getIndex()); + } + if (other.code_ != 0) { + setCodeValue(other.getCodeValue()); + } + if (!other.getMessage().isEmpty()) { + message_ = other.message_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + index_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + code_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + message_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long index_; + + /** + * + * + *
+     * Index of the malformed row in the request.
+     * 
+ * + * int64 index = 1; + * + * @return The index. + */ + @java.lang.Override + public long getIndex() { + return index_; + } + + /** + * + * + *
+     * Index of the malformed row in the request.
+     * 
+ * + * int64 index = 1; + * + * @param value The index to set. + * @return This builder for chaining. + */ + public Builder setIndex(long value) { + + index_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Index of the malformed row in the request.
+     * 
+ * + * int64 index = 1; + * + * @return This builder for chaining. + */ + public Builder clearIndex() { + bitField0_ = (bitField0_ & ~0x00000001); + index_ = 0L; + onChanged(); + return this; + } + + private int code_ = 0; + + /** + * + * + *
+     * Structured error reason for a row error.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return The enum numeric value on the wire for code. + */ + @java.lang.Override + public int getCodeValue() { + return code_; + } + + /** + * + * + *
+     * Structured error reason for a row error.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @param value The enum numeric value on the wire for code to set. + * @return This builder for chaining. + */ + public Builder setCodeValue(int value) { + code_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Structured error reason for a row error.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return The code. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode getCode() { + com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode result = + com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode.forNumber(code_); + return result == null + ? com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Structured error reason for a row error.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @param value The code to set. + * @return This builder for chaining. + */ + public Builder setCode(com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + code_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Structured error reason for a row error.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return This builder for chaining. + */ + public Builder clearCode() { + bitField0_ = (bitField0_ & ~0x00000002); + code_ = 0; + onChanged(); + return this; + } + + private java.lang.Object message_ = ""; + + /** + * + * + *
+     * Description of the issue encountered when processing the row.
+     * 
+ * + * string message = 3; + * + * @return The message. + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + message_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Description of the issue encountered when processing the row.
+     * 
+ * + * string message = 3; + * + * @return The bytes for message. + */ + public com.google.protobuf.ByteString getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Description of the issue encountered when processing the row.
+     * 
+ * + * string message = 3; + * + * @param value The message to set. + * @return This builder for chaining. + */ + public Builder setMessage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + message_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Description of the issue encountered when processing the row.
+     * 
+ * + * string message = 3; + * + * @return This builder for chaining. + */ + public Builder clearMessage() { + message_ = getDefaultInstance().getMessage(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Description of the issue encountered when processing the row.
+     * 
+ * + * string message = 3; + * + * @param value The bytes for message to set. + * @return This builder for chaining. + */ + public Builder setMessageBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + message_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.RowError) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.RowError) + private static final com.google.cloud.bigquery.storage.v1.RowError DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.RowError(); + } + + public static com.google.cloud.bigquery.storage.v1.RowError getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RowError parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowErrorOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowErrorOrBuilder.java new file mode 100644 index 000000000000..59f352e455d9 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowErrorOrBuilder.java @@ -0,0 +1,91 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface RowErrorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.RowError) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Index of the malformed row in the request.
+   * 
+ * + * int64 index = 1; + * + * @return The index. + */ + long getIndex(); + + /** + * + * + *
+   * Structured error reason for a row error.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return The enum numeric value on the wire for code. + */ + int getCodeValue(); + + /** + * + * + *
+   * Structured error reason for a row error.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return The code. + */ + com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode getCode(); + + /** + * + * + *
+   * Description of the issue encountered when processing the row.
+   * 
+ * + * string message = 3; + * + * @return The message. + */ + java.lang.String getMessage(); + + /** + * + * + *
+   * Description of the issue encountered when processing the row.
+   * 
+ * + * string message = 3; + * + * @return The bytes for message. + */ + com.google.protobuf.ByteString getMessageBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequest.java new file mode 100644 index 000000000000..a5ed7619c01a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequest.java @@ -0,0 +1,774 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Request message for `SplitReadStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.SplitReadStreamRequest} + */ +public final class SplitReadStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.SplitReadStreamRequest) + SplitReadStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use SplitReadStreamRequest.newBuilder() to construct. + private SplitReadStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SplitReadStreamRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SplitReadStreamRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest.class, + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the stream to split.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the stream to split.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FRACTION_FIELD_NUMBER = 2; + private double fraction_ = 0D; + + /** + * + * + *
+   * A value in the range (0.0, 1.0) that specifies the fractional point at
+   * which the original stream should be split. The actual split point is
+   * evaluated on pre-filtered rows, so if a filter is provided, then there is
+   * no guarantee that the division of the rows between the new child streams
+   * will be proportional to this fractional value. Additionally, because the
+   * server-side unit for assigning data is collections of rows, this fraction
+   * will always map to a data storage boundary on the server side.
+   * 
+ * + * double fraction = 2; + * + * @return The fraction. + */ + @java.lang.Override + public double getFraction() { + return fraction_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (java.lang.Double.doubleToRawLongBits(fraction_) != 0) { + output.writeDouble(2, fraction_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (java.lang.Double.doubleToRawLongBits(fraction_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(2, fraction_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest other = + (com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (java.lang.Double.doubleToLongBits(getFraction()) + != java.lang.Double.doubleToLongBits(other.getFraction())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + FRACTION_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getFraction())); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `SplitReadStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.SplitReadStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.SplitReadStreamRequest) + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest.class, + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + fraction_ = 0D; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest build() { + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest buildPartial() { + com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest result = + new com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.fraction_ = fraction_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest other) { + if (other == com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getFraction() != 0D) { + setFraction(other.getFraction()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 17: + { + fraction_ = input.readDouble(); + bitField0_ |= 0x00000002; + break; + } // case 17 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private double fraction_; + + /** + * + * + *
+     * A value in the range (0.0, 1.0) that specifies the fractional point at
+     * which the original stream should be split. The actual split point is
+     * evaluated on pre-filtered rows, so if a filter is provided, then there is
+     * no guarantee that the division of the rows between the new child streams
+     * will be proportional to this fractional value. Additionally, because the
+     * server-side unit for assigning data is collections of rows, this fraction
+     * will always map to a data storage boundary on the server side.
+     * 
+ * + * double fraction = 2; + * + * @return The fraction. + */ + @java.lang.Override + public double getFraction() { + return fraction_; + } + + /** + * + * + *
+     * A value in the range (0.0, 1.0) that specifies the fractional point at
+     * which the original stream should be split. The actual split point is
+     * evaluated on pre-filtered rows, so if a filter is provided, then there is
+     * no guarantee that the division of the rows between the new child streams
+     * will be proportional to this fractional value. Additionally, because the
+     * server-side unit for assigning data is collections of rows, this fraction
+     * will always map to a data storage boundary on the server side.
+     * 
+ * + * double fraction = 2; + * + * @param value The fraction to set. + * @return This builder for chaining. + */ + public Builder setFraction(double value) { + + fraction_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * A value in the range (0.0, 1.0) that specifies the fractional point at
+     * which the original stream should be split. The actual split point is
+     * evaluated on pre-filtered rows, so if a filter is provided, then there is
+     * no guarantee that the division of the rows between the new child streams
+     * will be proportional to this fractional value. Additionally, because the
+     * server-side unit for assigning data is collections of rows, this fraction
+     * will always map to a data storage boundary on the server side.
+     * 
+ * + * double fraction = 2; + * + * @return This builder for chaining. + */ + public Builder clearFraction() { + bitField0_ = (bitField0_ & ~0x00000002); + fraction_ = 0D; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.SplitReadStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.SplitReadStreamRequest) + private static final com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SplitReadStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequestOrBuilder.java new file mode 100644 index 000000000000..efe759bfe9e2 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequestOrBuilder.java @@ -0,0 +1,75 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface SplitReadStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.SplitReadStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the stream to split.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the stream to split.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * A value in the range (0.0, 1.0) that specifies the fractional point at
+   * which the original stream should be split. The actual split point is
+   * evaluated on pre-filtered rows, so if a filter is provided, then there is
+   * no guarantee that the division of the rows between the new child streams
+   * will be proportional to this fractional value. Additionally, because the
+   * server-side unit for assigning data is collections of rows, this fraction
+   * will always map to a data storage boundary on the server side.
+   * 
+ * + * double fraction = 2; + * + * @return The fraction. + */ + double getFraction(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponse.java new file mode 100644 index 000000000000..fe1bcbff545a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponse.java @@ -0,0 +1,1062 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Response message for `SplitReadStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.SplitReadStreamResponse} + */ +public final class SplitReadStreamResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.SplitReadStreamResponse) + SplitReadStreamResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use SplitReadStreamResponse.newBuilder() to construct. + private SplitReadStreamResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SplitReadStreamResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SplitReadStreamResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse.class, + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse.Builder.class); + } + + private int bitField0_; + public static final int PRIMARY_STREAM_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1.ReadStream primaryStream_; + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + * + * @return Whether the primaryStream field is set. + */ + @java.lang.Override + public boolean hasPrimaryStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + * + * @return The primaryStream. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadStream getPrimaryStream() { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance() + : primaryStream_; + } + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder getPrimaryStreamOrBuilder() { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance() + : primaryStream_; + } + + public static final int REMAINDER_STREAM_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1.ReadStream remainderStream_; + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + * + * @return Whether the remainderStream field is set. + */ + @java.lang.Override + public boolean hasRemainderStream() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + * + * @return The remainderStream. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadStream getRemainderStream() { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance() + : remainderStream_; + } + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder getRemainderStreamOrBuilder() { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance() + : remainderStream_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getPrimaryStream()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getRemainderStream()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getPrimaryStream()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getRemainderStream()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse other = + (com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse) obj; + + if (hasPrimaryStream() != other.hasPrimaryStream()) return false; + if (hasPrimaryStream()) { + if (!getPrimaryStream().equals(other.getPrimaryStream())) return false; + } + if (hasRemainderStream() != other.hasRemainderStream()) return false; + if (hasRemainderStream()) { + if (!getRemainderStream().equals(other.getRemainderStream())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasPrimaryStream()) { + hash = (37 * hash) + PRIMARY_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getPrimaryStream().hashCode(); + } + if (hasRemainderStream()) { + hash = (37 * hash) + REMAINDER_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getRemainderStream().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for `SplitReadStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.SplitReadStreamResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.SplitReadStreamResponse) + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse.class, + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getPrimaryStreamFieldBuilder(); + getRemainderStreamFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + primaryStream_ = null; + if (primaryStreamBuilder_ != null) { + primaryStreamBuilder_.dispose(); + primaryStreamBuilder_ = null; + } + remainderStream_ = null; + if (remainderStreamBuilder_ != null) { + remainderStreamBuilder_.dispose(); + remainderStreamBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse build() { + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse buildPartial() { + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse result = + new com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.primaryStream_ = + primaryStreamBuilder_ == null ? primaryStream_ : primaryStreamBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.remainderStream_ = + remainderStreamBuilder_ == null ? remainderStream_ : remainderStreamBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse.getDefaultInstance()) + return this; + if (other.hasPrimaryStream()) { + mergePrimaryStream(other.getPrimaryStream()); + } + if (other.hasRemainderStream()) { + mergeRemainderStream(other.getRemainderStream()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getPrimaryStreamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getRemainderStreamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1.ReadStream primaryStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadStream, + com.google.cloud.bigquery.storage.v1.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder> + primaryStreamBuilder_; + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + * + * @return Whether the primaryStream field is set. + */ + public boolean hasPrimaryStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + * + * @return The primaryStream. + */ + public com.google.cloud.bigquery.storage.v1.ReadStream getPrimaryStream() { + if (primaryStreamBuilder_ == null) { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance() + : primaryStream_; + } else { + return primaryStreamBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + */ + public Builder setPrimaryStream(com.google.cloud.bigquery.storage.v1.ReadStream value) { + if (primaryStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + primaryStream_ = value; + } else { + primaryStreamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + */ + public Builder setPrimaryStream( + com.google.cloud.bigquery.storage.v1.ReadStream.Builder builderForValue) { + if (primaryStreamBuilder_ == null) { + primaryStream_ = builderForValue.build(); + } else { + primaryStreamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + */ + public Builder mergePrimaryStream(com.google.cloud.bigquery.storage.v1.ReadStream value) { + if (primaryStreamBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && primaryStream_ != null + && primaryStream_ + != com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance()) { + getPrimaryStreamBuilder().mergeFrom(value); + } else { + primaryStream_ = value; + } + } else { + primaryStreamBuilder_.mergeFrom(value); + } + if (primaryStream_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + */ + public Builder clearPrimaryStream() { + bitField0_ = (bitField0_ & ~0x00000001); + primaryStream_ = null; + if (primaryStreamBuilder_ != null) { + primaryStreamBuilder_.dispose(); + primaryStreamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + */ + public com.google.cloud.bigquery.storage.v1.ReadStream.Builder getPrimaryStreamBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getPrimaryStreamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + */ + public com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder getPrimaryStreamOrBuilder() { + if (primaryStreamBuilder_ != null) { + return primaryStreamBuilder_.getMessageOrBuilder(); + } else { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance() + : primaryStream_; + } + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadStream, + com.google.cloud.bigquery.storage.v1.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder> + getPrimaryStreamFieldBuilder() { + if (primaryStreamBuilder_ == null) { + primaryStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadStream, + com.google.cloud.bigquery.storage.v1.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder>( + getPrimaryStream(), getParentForChildren(), isClean()); + primaryStream_ = null; + } + return primaryStreamBuilder_; + } + + private com.google.cloud.bigquery.storage.v1.ReadStream remainderStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadStream, + com.google.cloud.bigquery.storage.v1.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder> + remainderStreamBuilder_; + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + * + * @return Whether the remainderStream field is set. + */ + public boolean hasRemainderStream() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + * + * @return The remainderStream. + */ + public com.google.cloud.bigquery.storage.v1.ReadStream getRemainderStream() { + if (remainderStreamBuilder_ == null) { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance() + : remainderStream_; + } else { + return remainderStreamBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + */ + public Builder setRemainderStream(com.google.cloud.bigquery.storage.v1.ReadStream value) { + if (remainderStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + remainderStream_ = value; + } else { + remainderStreamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + */ + public Builder setRemainderStream( + com.google.cloud.bigquery.storage.v1.ReadStream.Builder builderForValue) { + if (remainderStreamBuilder_ == null) { + remainderStream_ = builderForValue.build(); + } else { + remainderStreamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + */ + public Builder mergeRemainderStream(com.google.cloud.bigquery.storage.v1.ReadStream value) { + if (remainderStreamBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && remainderStream_ != null + && remainderStream_ + != com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance()) { + getRemainderStreamBuilder().mergeFrom(value); + } else { + remainderStream_ = value; + } + } else { + remainderStreamBuilder_.mergeFrom(value); + } + if (remainderStream_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + */ + public Builder clearRemainderStream() { + bitField0_ = (bitField0_ & ~0x00000002); + remainderStream_ = null; + if (remainderStreamBuilder_ != null) { + remainderStreamBuilder_.dispose(); + remainderStreamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + */ + public com.google.cloud.bigquery.storage.v1.ReadStream.Builder getRemainderStreamBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRemainderStreamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + */ + public com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder getRemainderStreamOrBuilder() { + if (remainderStreamBuilder_ != null) { + return remainderStreamBuilder_.getMessageOrBuilder(); + } else { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1.ReadStream.getDefaultInstance() + : remainderStream_; + } + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadStream, + com.google.cloud.bigquery.storage.v1.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder> + getRemainderStreamFieldBuilder() { + if (remainderStreamBuilder_ == null) { + remainderStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.ReadStream, + com.google.cloud.bigquery.storage.v1.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder>( + getRemainderStream(), getParentForChildren(), isClean()); + remainderStream_ = null; + } + return remainderStreamBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.SplitReadStreamResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.SplitReadStreamResponse) + private static final com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse(); + } + + public static com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SplitReadStreamResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponseOrBuilder.java new file mode 100644 index 000000000000..824e8a5ea1d9 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponseOrBuilder.java @@ -0,0 +1,109 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface SplitReadStreamResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.SplitReadStreamResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + * + * @return Whether the primaryStream field is set. + */ + boolean hasPrimaryStream(); + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + * + * @return The primaryStream. + */ + com.google.cloud.bigquery.storage.v1.ReadStream getPrimaryStream(); + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream primary_stream = 1; + */ + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder getPrimaryStreamOrBuilder(); + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + * + * @return Whether the remainderStream field is set. + */ + boolean hasRemainderStream(); + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + * + * @return The remainderStream. + */ + com.google.cloud.bigquery.storage.v1.ReadStream getRemainderStream(); + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.ReadStream remainder_stream = 2; + */ + com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder getRemainderStreamOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageError.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageError.java new file mode 100644 index 000000000000..38bf9873ab97 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageError.java @@ -0,0 +1,1446 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Structured custom BigQuery Storage error message. The error can be attached
+ * as error details in the returned rpc Status. In particular, the use of error
+ * codes allows more structured error handling, and reduces the need to evaluate
+ * unstructured error text strings.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.StorageError} + */ +public final class StorageError extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.StorageError) + StorageErrorOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StorageError.newBuilder() to construct. + private StorageError(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StorageError() { + code_ = 0; + entity_ = ""; + errorMessage_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StorageError(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StorageError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.StorageError.class, + com.google.cloud.bigquery.storage.v1.StorageError.Builder.class); + } + + /** + * + * + *
+   * Error code for `StorageError`.
+   * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode} + */ + public enum StorageErrorCode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Default error.
+     * 
+ * + * STORAGE_ERROR_CODE_UNSPECIFIED = 0; + */ + STORAGE_ERROR_CODE_UNSPECIFIED(0), + /** + * + * + *
+     * Table is not found in the system.
+     * 
+ * + * TABLE_NOT_FOUND = 1; + */ + TABLE_NOT_FOUND(1), + /** + * + * + *
+     * Stream is already committed.
+     * 
+ * + * STREAM_ALREADY_COMMITTED = 2; + */ + STREAM_ALREADY_COMMITTED(2), + /** + * + * + *
+     * Stream is not found.
+     * 
+ * + * STREAM_NOT_FOUND = 3; + */ + STREAM_NOT_FOUND(3), + /** + * + * + *
+     * Invalid Stream type.
+     * For example, you try to commit a stream that is not pending.
+     * 
+ * + * INVALID_STREAM_TYPE = 4; + */ + INVALID_STREAM_TYPE(4), + /** + * + * + *
+     * Invalid Stream state.
+     * For example, you try to commit a stream that is not finalized or is
+     * garbaged.
+     * 
+ * + * INVALID_STREAM_STATE = 5; + */ + INVALID_STREAM_STATE(5), + /** + * + * + *
+     * Stream is finalized.
+     * 
+ * + * STREAM_FINALIZED = 6; + */ + STREAM_FINALIZED(6), + /** + * + * + *
+     * There is a schema mismatch and it is caused by user schema has extra
+     * field than bigquery schema.
+     * 
+ * + * SCHEMA_MISMATCH_EXTRA_FIELDS = 7; + */ + SCHEMA_MISMATCH_EXTRA_FIELDS(7), + /** + * + * + *
+     * Offset already exists.
+     * 
+ * + * OFFSET_ALREADY_EXISTS = 8; + */ + OFFSET_ALREADY_EXISTS(8), + /** + * + * + *
+     * Offset out of range.
+     * 
+ * + * OFFSET_OUT_OF_RANGE = 9; + */ + OFFSET_OUT_OF_RANGE(9), + /** + * + * + *
+     * Customer-managed encryption key (CMEK) not provided for CMEK-enabled
+     * data.
+     * 
+ * + * CMEK_NOT_PROVIDED = 10; + */ + CMEK_NOT_PROVIDED(10), + /** + * + * + *
+     * Customer-managed encryption key (CMEK) was incorrectly provided.
+     * 
+ * + * INVALID_CMEK_PROVIDED = 11; + */ + INVALID_CMEK_PROVIDED(11), + /** + * + * + *
+     * There is an encryption error while using customer-managed encryption key.
+     * 
+ * + * CMEK_ENCRYPTION_ERROR = 12; + */ + CMEK_ENCRYPTION_ERROR(12), + /** + * + * + *
+     * Key Management Service (KMS) service returned an error, which can be
+     * retried.
+     * 
+ * + * KMS_SERVICE_ERROR = 13; + */ + KMS_SERVICE_ERROR(13), + /** + * + * + *
+     * Permission denied while using customer-managed encryption key.
+     * 
+ * + * KMS_PERMISSION_DENIED = 14; + */ + KMS_PERMISSION_DENIED(14), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Default error.
+     * 
+ * + * STORAGE_ERROR_CODE_UNSPECIFIED = 0; + */ + public static final int STORAGE_ERROR_CODE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * Table is not found in the system.
+     * 
+ * + * TABLE_NOT_FOUND = 1; + */ + public static final int TABLE_NOT_FOUND_VALUE = 1; + + /** + * + * + *
+     * Stream is already committed.
+     * 
+ * + * STREAM_ALREADY_COMMITTED = 2; + */ + public static final int STREAM_ALREADY_COMMITTED_VALUE = 2; + + /** + * + * + *
+     * Stream is not found.
+     * 
+ * + * STREAM_NOT_FOUND = 3; + */ + public static final int STREAM_NOT_FOUND_VALUE = 3; + + /** + * + * + *
+     * Invalid Stream type.
+     * For example, you try to commit a stream that is not pending.
+     * 
+ * + * INVALID_STREAM_TYPE = 4; + */ + public static final int INVALID_STREAM_TYPE_VALUE = 4; + + /** + * + * + *
+     * Invalid Stream state.
+     * For example, you try to commit a stream that is not finalized or is
+     * garbaged.
+     * 
+ * + * INVALID_STREAM_STATE = 5; + */ + public static final int INVALID_STREAM_STATE_VALUE = 5; + + /** + * + * + *
+     * Stream is finalized.
+     * 
+ * + * STREAM_FINALIZED = 6; + */ + public static final int STREAM_FINALIZED_VALUE = 6; + + /** + * + * + *
+     * There is a schema mismatch and it is caused by user schema has extra
+     * field than bigquery schema.
+     * 
+ * + * SCHEMA_MISMATCH_EXTRA_FIELDS = 7; + */ + public static final int SCHEMA_MISMATCH_EXTRA_FIELDS_VALUE = 7; + + /** + * + * + *
+     * Offset already exists.
+     * 
+ * + * OFFSET_ALREADY_EXISTS = 8; + */ + public static final int OFFSET_ALREADY_EXISTS_VALUE = 8; + + /** + * + * + *
+     * Offset out of range.
+     * 
+ * + * OFFSET_OUT_OF_RANGE = 9; + */ + public static final int OFFSET_OUT_OF_RANGE_VALUE = 9; + + /** + * + * + *
+     * Customer-managed encryption key (CMEK) not provided for CMEK-enabled
+     * data.
+     * 
+ * + * CMEK_NOT_PROVIDED = 10; + */ + public static final int CMEK_NOT_PROVIDED_VALUE = 10; + + /** + * + * + *
+     * Customer-managed encryption key (CMEK) was incorrectly provided.
+     * 
+ * + * INVALID_CMEK_PROVIDED = 11; + */ + public static final int INVALID_CMEK_PROVIDED_VALUE = 11; + + /** + * + * + *
+     * There is an encryption error while using customer-managed encryption key.
+     * 
+ * + * CMEK_ENCRYPTION_ERROR = 12; + */ + public static final int CMEK_ENCRYPTION_ERROR_VALUE = 12; + + /** + * + * + *
+     * Key Management Service (KMS) service returned an error, which can be
+     * retried.
+     * 
+ * + * KMS_SERVICE_ERROR = 13; + */ + public static final int KMS_SERVICE_ERROR_VALUE = 13; + + /** + * + * + *
+     * Permission denied while using customer-managed encryption key.
+     * 
+ * + * KMS_PERMISSION_DENIED = 14; + */ + public static final int KMS_PERMISSION_DENIED_VALUE = 14; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static StorageErrorCode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static StorageErrorCode forNumber(int value) { + switch (value) { + case 0: + return STORAGE_ERROR_CODE_UNSPECIFIED; + case 1: + return TABLE_NOT_FOUND; + case 2: + return STREAM_ALREADY_COMMITTED; + case 3: + return STREAM_NOT_FOUND; + case 4: + return INVALID_STREAM_TYPE; + case 5: + return INVALID_STREAM_STATE; + case 6: + return STREAM_FINALIZED; + case 7: + return SCHEMA_MISMATCH_EXTRA_FIELDS; + case 8: + return OFFSET_ALREADY_EXISTS; + case 9: + return OFFSET_OUT_OF_RANGE; + case 10: + return CMEK_NOT_PROVIDED; + case 11: + return INVALID_CMEK_PROVIDED; + case 12: + return CMEK_ENCRYPTION_ERROR; + case 13: + return KMS_SERVICE_ERROR; + case 14: + return KMS_PERMISSION_DENIED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public StorageErrorCode findValueByNumber(int number) { + return StorageErrorCode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageError.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final StorageErrorCode[] VALUES = values(); + + public static StorageErrorCode valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private StorageErrorCode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode) + } + + public static final int CODE_FIELD_NUMBER = 1; + private int code_ = 0; + + /** + * + * + *
+   * BigQuery Storage specific error code.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return The enum numeric value on the wire for code. + */ + @java.lang.Override + public int getCodeValue() { + return code_; + } + + /** + * + * + *
+   * BigQuery Storage specific error code.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return The code. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode getCode() { + com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode result = + com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode.forNumber(code_); + return result == null + ? com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode.UNRECOGNIZED + : result; + } + + public static final int ENTITY_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object entity_ = ""; + + /** + * + * + *
+   * Name of the failed entity.
+   * 
+ * + * string entity = 2; + * + * @return The entity. + */ + @java.lang.Override + public java.lang.String getEntity() { + java.lang.Object ref = entity_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entity_ = s; + return s; + } + } + + /** + * + * + *
+   * Name of the failed entity.
+   * 
+ * + * string entity = 2; + * + * @return The bytes for entity. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEntityBytes() { + java.lang.Object ref = entity_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entity_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ERROR_MESSAGE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object errorMessage_ = ""; + + /** + * + * + *
+   * Message that describes the error.
+   * 
+ * + * string error_message = 3; + * + * @return The errorMessage. + */ + @java.lang.Override + public java.lang.String getErrorMessage() { + java.lang.Object ref = errorMessage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + errorMessage_ = s; + return s; + } + } + + /** + * + * + *
+   * Message that describes the error.
+   * 
+ * + * string error_message = 3; + * + * @return The bytes for errorMessage. + */ + @java.lang.Override + public com.google.protobuf.ByteString getErrorMessageBytes() { + java.lang.Object ref = errorMessage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + errorMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (code_ + != com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode + .STORAGE_ERROR_CODE_UNSPECIFIED + .getNumber()) { + output.writeEnum(1, code_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(entity_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, entity_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(errorMessage_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, errorMessage_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (code_ + != com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode + .STORAGE_ERROR_CODE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, code_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(entity_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, entity_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(errorMessage_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, errorMessage_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.StorageError)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.StorageError other = + (com.google.cloud.bigquery.storage.v1.StorageError) obj; + + if (code_ != other.code_) return false; + if (!getEntity().equals(other.getEntity())) return false; + if (!getErrorMessage().equals(other.getErrorMessage())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CODE_FIELD_NUMBER; + hash = (53 * hash) + code_; + hash = (37 * hash) + ENTITY_FIELD_NUMBER; + hash = (53 * hash) + getEntity().hashCode(); + hash = (37 * hash) + ERROR_MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getErrorMessage().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.StorageError prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Structured custom BigQuery Storage error message. The error can be attached
+   * as error details in the returned rpc Status. In particular, the use of error
+   * codes allows more structured error handling, and reduces the need to evaluate
+   * unstructured error text strings.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.StorageError} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.StorageError) + com.google.cloud.bigquery.storage.v1.StorageErrorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StorageError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.StorageError.class, + com.google.cloud.bigquery.storage.v1.StorageError.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.StorageError.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + code_ = 0; + entity_ = ""; + errorMessage_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.StorageError.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError build() { + com.google.cloud.bigquery.storage.v1.StorageError result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError buildPartial() { + com.google.cloud.bigquery.storage.v1.StorageError result = + new com.google.cloud.bigquery.storage.v1.StorageError(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.StorageError result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.code_ = code_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.entity_ = entity_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.errorMessage_ = errorMessage_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.StorageError) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.StorageError) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.StorageError other) { + if (other == com.google.cloud.bigquery.storage.v1.StorageError.getDefaultInstance()) + return this; + if (other.code_ != 0) { + setCodeValue(other.getCodeValue()); + } + if (!other.getEntity().isEmpty()) { + entity_ = other.entity_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getErrorMessage().isEmpty()) { + errorMessage_ = other.errorMessage_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + code_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + entity_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + errorMessage_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int code_ = 0; + + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return The enum numeric value on the wire for code. + */ + @java.lang.Override + public int getCodeValue() { + return code_; + } + + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @param value The enum numeric value on the wire for code to set. + * @return This builder for chaining. + */ + public Builder setCodeValue(int value) { + code_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return The code. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode getCode() { + com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode result = + com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode.forNumber(code_); + return result == null + ? com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @param value The code to set. + * @return This builder for chaining. + */ + public Builder setCode( + com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + code_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return This builder for chaining. + */ + public Builder clearCode() { + bitField0_ = (bitField0_ & ~0x00000001); + code_ = 0; + onChanged(); + return this; + } + + private java.lang.Object entity_ = ""; + + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @return The entity. + */ + public java.lang.String getEntity() { + java.lang.Object ref = entity_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entity_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @return The bytes for entity. + */ + public com.google.protobuf.ByteString getEntityBytes() { + java.lang.Object ref = entity_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entity_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @param value The entity to set. + * @return This builder for chaining. + */ + public Builder setEntity(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + entity_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @return This builder for chaining. + */ + public Builder clearEntity() { + entity_ = getDefaultInstance().getEntity(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @param value The bytes for entity to set. + * @return This builder for chaining. + */ + public Builder setEntityBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + entity_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object errorMessage_ = ""; + + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @return The errorMessage. + */ + public java.lang.String getErrorMessage() { + java.lang.Object ref = errorMessage_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + errorMessage_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @return The bytes for errorMessage. + */ + public com.google.protobuf.ByteString getErrorMessageBytes() { + java.lang.Object ref = errorMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + errorMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @param value The errorMessage to set. + * @return This builder for chaining. + */ + public Builder setErrorMessage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + errorMessage_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @return This builder for chaining. + */ + public Builder clearErrorMessage() { + errorMessage_ = getDefaultInstance().getErrorMessage(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @param value The bytes for errorMessage to set. + * @return This builder for chaining. + */ + public Builder setErrorMessageBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + errorMessage_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.StorageError) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.StorageError) + private static final com.google.cloud.bigquery.storage.v1.StorageError DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.StorageError(); + } + + public static com.google.cloud.bigquery.storage.v1.StorageError getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StorageError parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StorageError getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageErrorOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageErrorOrBuilder.java new file mode 100644 index 000000000000..d737491c6cda --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageErrorOrBuilder.java @@ -0,0 +1,104 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface StorageErrorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.StorageError) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * BigQuery Storage specific error code.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return The enum numeric value on the wire for code. + */ + int getCodeValue(); + + /** + * + * + *
+   * BigQuery Storage specific error code.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode code = 1; + * + * @return The code. + */ + com.google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode getCode(); + + /** + * + * + *
+   * Name of the failed entity.
+   * 
+ * + * string entity = 2; + * + * @return The entity. + */ + java.lang.String getEntity(); + + /** + * + * + *
+   * Name of the failed entity.
+   * 
+ * + * string entity = 2; + * + * @return The bytes for entity. + */ + com.google.protobuf.ByteString getEntityBytes(); + + /** + * + * + *
+   * Message that describes the error.
+   * 
+ * + * string error_message = 3; + * + * @return The errorMessage. + */ + java.lang.String getErrorMessage(); + + /** + * + * + *
+   * Message that describes the error.
+   * 
+ * + * string error_message = 3; + * + * @return The bytes for errorMessage. + */ + com.google.protobuf.ByteString getErrorMessageBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java new file mode 100644 index 000000000000..4fa1356a957b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java @@ -0,0 +1,589 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public final class StorageProto { + private StorageProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_CreateReadSessionRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_CreateReadSessionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ReadRowsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ReadRowsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ThrottleState_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ThrottleState_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_StreamStats_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_StreamStats_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_StreamStats_Progress_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_StreamStats_Progress_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ReadRowsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ReadRowsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ArrowData_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ArrowData_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_MissingValueInterpretationsEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_MissingValueInterpretationsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_StorageError_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_RowError_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_RowError_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n.google/cloud/bigquery/storage/v1/stora" + + "ge.proto\022 google.cloud.bigquery.storage." + + "v1\032\034google/api/annotations.proto\032\027google" + + "/api/client.proto\032\037google/api/field_beha" + + "vior.proto\032\031google/api/resource.proto\032,g" + + "oogle/cloud/bigquery/storage/v1/arrow.pr" + + "oto\032+google/cloud/bigquery/storage/v1/av" + + "ro.proto\032/google/cloud/bigquery/storage/" + + "v1/protobuf.proto\032-google/cloud/bigquery" + + "/storage/v1/stream.proto\032,google/cloud/b" + + "igquery/storage/v1/table.proto\032\037google/p" + + "rotobuf/timestamp.proto\032\036google/protobuf" + + "/wrappers.proto\032\027google/rpc/status.proto" + + "\"\347\001\n\030CreateReadSessionRequest\022C\n\006parent\030" + + "\001 \001(\tB3\340A\002\372A-\n+cloudresourcemanager.goog" + + "leapis.com/Project\022H\n\014read_session\030\002 \001(\013" + + "2-.google.cloud.bigquery.storage.v1.Read" + + "SessionB\003\340A\002\022\030\n\020max_stream_count\030\003 \001(\005\022\"" + + "\n\032preferred_min_stream_count\030\004 \001(\005\"i\n\017Re" + + "adRowsRequest\022F\n\013read_stream\030\001 \001(\tB1\340A\002\372" + + "A+\n)bigquerystorage.googleapis.com/ReadS" + + "tream\022\016\n\006offset\030\002 \001(\003\")\n\rThrottleState\022\030" + + "\n\020throttle_percent\030\001 \001(\005\"\227\001\n\013StreamStats" + + "\022H\n\010progress\030\002 \001(\01326.google.cloud.bigque" + + "ry.storage.v1.StreamStats.Progress\032>\n\010Pr" + + "ogress\022\031\n\021at_response_start\030\001 \001(\001\022\027\n\017at_" + + "response_end\030\002 \001(\001\"\254\004\n\020ReadRowsResponse\022" + + "?\n\tavro_rows\030\003 \001(\0132*.google.cloud.bigque" + + "ry.storage.v1.AvroRowsH\000\022P\n\022arrow_record" + + "_batch\030\004 \001(\01322.google.cloud.bigquery.sto" + + "rage.v1.ArrowRecordBatchH\000\022\021\n\trow_count\030" + + "\006 \001(\003\022<\n\005stats\030\002 \001(\0132-.google.cloud.bigq" + + "uery.storage.v1.StreamStats\022G\n\016throttle_" + + "state\030\005 \001(\0132/.google.cloud.bigquery.stor" + + "age.v1.ThrottleState\022H\n\013avro_schema\030\007 \001(" + + "\0132,.google.cloud.bigquery.storage.v1.Avr" + + "oSchemaB\003\340A\003H\001\022J\n\014arrow_schema\030\010 \001(\0132-.g" + + "oogle.cloud.bigquery.storage.v1.ArrowSch" + + "emaB\003\340A\003H\001\022(\n\026uncompressed_byte_size\030\t \001" + + "(\003B\003\340A\001H\002\210\001\001B\006\n\004rowsB\010\n\006schemaB\031\n\027_uncom" + + "pressed_byte_size\"k\n\026SplitReadStreamRequ" + + "est\022?\n\004name\030\001 \001(\tB1\340A\002\372A+\n)bigquerystora" + + "ge.googleapis.com/ReadStream\022\020\n\010fraction" + + "\030\002 \001(\001\"\247\001\n\027SplitReadStreamResponse\022D\n\016pr" + + "imary_stream\030\001 \001(\0132,.google.cloud.bigque" + + "ry.storage.v1.ReadStream\022F\n\020remainder_st" + + "ream\030\002 \001(\0132,.google.cloud.bigquery.stora" + + "ge.v1.ReadStream\"\233\001\n\030CreateWriteStreamRe" + + "quest\0225\n\006parent\030\001 \001(\tB%\340A\002\372A\037\n\035bigquery." + + "googleapis.com/Table\022H\n\014write_stream\030\002 \001" + + "(\0132-.google.cloud.bigquery.storage.v1.Wr" + + "iteStreamB\003\340A\002\"\370\010\n\021AppendRowsRequest\022H\n\014" + + "write_stream\030\001 \001(\tB2\340A\002\372A,\n*bigquerystor" + + "age.googleapis.com/WriteStream\022+\n\006offset" + + "\030\002 \001(\0132\033.google.protobuf.Int64Value\022S\n\np" + + "roto_rows\030\004 \001(\0132=.google.cloud.bigquery." + + "storage.v1.AppendRowsRequest.ProtoDataH\000" + + "\022S\n\narrow_rows\030\005 \001(\0132=.google.cloud.bigq" + + "uery.storage.v1.AppendRowsRequest.ArrowD" + + "ataH\000\022\020\n\010trace_id\030\006 \001(\t\022{\n\035missing_value" + + "_interpretations\030\007 \003(\0132T.google.cloud.bi" + + "gquery.storage.v1.AppendRowsRequest.Miss" + + "ingValueInterpretationsEntry\022\201\001\n$default" + + "_missing_value_interpretation\030\010 \001(\0162N.go" + + "ogle.cloud.bigquery.storage.v1.AppendRow" + + "sRequest.MissingValueInterpretationB\003\340A\001" + + "\032\223\001\n\tArrowData\022D\n\rwriter_schema\030\001 \001(\0132-." + + "google.cloud.bigquery.storage.v1.ArrowSc" + + "hema\022@\n\004rows\030\002 \001(\01322.google.cloud.bigque" + + "ry.storage.v1.ArrowRecordBatch\032\214\001\n\tProto" + + "Data\022D\n\rwriter_schema\030\001 \001(\0132-.google.clo" + + "ud.bigquery.storage.v1.ProtoSchema\0229\n\004ro" + + "ws\030\002 \001(\0132+.google.cloud.bigquery.storage" + + ".v1.ProtoRows\032\222\001\n MissingValueInterpreta" + + "tionsEntry\022\013\n\003key\030\001 \001(\t\022]\n\005value\030\002 \001(\0162N" + + ".google.cloud.bigquery.storage.v1.Append" + + "RowsRequest.MissingValueInterpretation:\002" + + "8\001\"m\n\032MissingValueInterpretation\022,\n(MISS" + + "ING_VALUE_INTERPRETATION_UNSPECIFIED\020\000\022\016" + + "\n\nNULL_VALUE\020\001\022\021\n\rDEFAULT_VALUE\020\002B\006\n\004row" + + "s\"\373\002\n\022AppendRowsResponse\022Z\n\rappend_resul" + + "t\030\001 \001(\0132A.google.cloud.bigquery.storage." + + "v1.AppendRowsResponse.AppendResultH\000\022#\n\005" + + "error\030\002 \001(\0132\022.google.rpc.StatusH\000\022E\n\016upd" + + "ated_schema\030\003 \001(\0132-.google.cloud.bigquer" + + "y.storage.v1.TableSchema\022>\n\nrow_errors\030\004" + + " \003(\0132*.google.cloud.bigquery.storage.v1." + + "RowError\022\024\n\014write_stream\030\005 \001(\t\032;\n\014Append" + + "Result\022+\n\006offset\030\001 \001(\0132\033.google.protobuf" + + ".Int64ValueB\n\n\010response\"\232\001\n\025GetWriteStre" + + "amRequest\022@\n\004name\030\001 \001(\tB2\340A\002\372A,\n*bigquer" + + "ystorage.googleapis.com/WriteStream\022?\n\004v" + + "iew\030\003 \001(\01621.google.cloud.bigquery.storag" + + "e.v1.WriteStreamView\"s\n\036BatchCommitWrite" + + "StreamsRequest\0225\n\006parent\030\001 \001(\tB%\340A\002\372A\037\n\035" + + "bigquery.googleapis.com/Table\022\032\n\rwrite_s" + + "treams\030\002 \003(\tB\003\340A\002\"\231\001\n\037BatchCommitWriteSt" + + "reamsResponse\022/\n\013commit_time\030\001 \001(\0132\032.goo" + + "gle.protobuf.Timestamp\022E\n\rstream_errors\030" + + "\002 \003(\0132..google.cloud.bigquery.storage.v1" + + ".StorageError\"^\n\032FinalizeWriteStreamRequ" + + "est\022@\n\004name\030\001 \001(\tB2\340A\002\372A,\n*bigquerystora" + + "ge.googleapis.com/WriteStream\"0\n\033Finaliz" + + "eWriteStreamResponse\022\021\n\trow_count\030\001 \001(\003\"" + + "\211\001\n\020FlushRowsRequest\022H\n\014write_stream\030\001 \001" + + "(\tB2\340A\002\372A,\n*bigquerystorage.googleapis.c" + + "om/WriteStream\022+\n\006offset\030\002 \001(\0132\033.google." + + "protobuf.Int64Value\"#\n\021FlushRowsResponse" + + "\022\016\n\006offset\030\001 \001(\003\"\244\004\n\014StorageError\022M\n\004cod" + + "e\030\001 \001(\0162?.google.cloud.bigquery.storage." + + "v1.StorageError.StorageErrorCode\022\016\n\006enti" + + "ty\030\002 \001(\t\022\025\n\rerror_message\030\003 \001(\t\"\235\003\n\020Stor" + + "ageErrorCode\022\"\n\036STORAGE_ERROR_CODE_UNSPE" + + "CIFIED\020\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\034\n\030STREAM_" + + "ALREADY_COMMITTED\020\002\022\024\n\020STREAM_NOT_FOUND\020" + + "\003\022\027\n\023INVALID_STREAM_TYPE\020\004\022\030\n\024INVALID_ST" + + "REAM_STATE\020\005\022\024\n\020STREAM_FINALIZED\020\006\022 \n\034SC" + + "HEMA_MISMATCH_EXTRA_FIELDS\020\007\022\031\n\025OFFSET_A" + + "LREADY_EXISTS\020\010\022\027\n\023OFFSET_OUT_OF_RANGE\020\t" + + "\022\025\n\021CMEK_NOT_PROVIDED\020\n\022\031\n\025INVALID_CMEK_" + + "PROVIDED\020\013\022\031\n\025CMEK_ENCRYPTION_ERROR\020\014\022\025\n" + + "\021KMS_SERVICE_ERROR\020\r\022\031\n\025KMS_PERMISSION_D" + + "ENIED\020\016\"\263\001\n\010RowError\022\r\n\005index\030\001 \001(\003\022E\n\004c" + + "ode\030\002 \001(\01627.google.cloud.bigquery.storag" + + "e.v1.RowError.RowErrorCode\022\017\n\007message\030\003 " + + "\001(\t\"@\n\014RowErrorCode\022\036\n\032ROW_ERROR_CODE_UN" + + "SPECIFIED\020\000\022\020\n\014FIELDS_ERROR\020\0012\222\006\n\014BigQue" + + "ryRead\022\351\001\n\021CreateReadSession\022:.google.cl" + + "oud.bigquery.storage.v1.CreateReadSessio" + + "nRequest\032-.google.cloud.bigquery.storage" + + ".v1.ReadSession\"i\332A$parent,read_session," + + "max_stream_count\202\323\344\223\002<\"7/v1/{read_sessio" + + "n.table=projects/*/datasets/*/tables/*}:" + + "\001*\022\317\001\n\010ReadRows\0221.google.cloud.bigquery." + + "storage.v1.ReadRowsRequest\0322.google.clou" + + "d.bigquery.storage.v1.ReadRowsResponse\"Z" + + "\332A\022read_stream,offset\202\323\344\223\002?\022=/v1/{read_s" + + "tream=projects/*/locations/*/sessions/*/" + + "streams/*}0\001\022\306\001\n\017SplitReadStream\0228.googl" + + "e.cloud.bigquery.storage.v1.SplitReadStr" + + "eamRequest\0329.google.cloud.bigquery.stora" + + "ge.v1.SplitReadStreamResponse\">\202\323\344\223\0028\0226/" + + "v1/{name=projects/*/locations/*/sessions" + + "/*/streams/*}\032{\312A\036bigquerystorage.google" + + "apis.com\322AWhttps://www.googleapis.com/au" + + "th/bigquery,https://www.googleapis.com/a" + + "uth/cloud-platform2\274\013\n\rBigQueryWrite\022\327\001\n" + + "\021CreateWriteStream\022:.google.cloud.bigque" + + "ry.storage.v1.CreateWriteStreamRequest\032-" + + ".google.cloud.bigquery.storage.v1.WriteS" + + "tream\"W\332A\023parent,write_stream\202\323\344\223\002;\"+/v1" + + "/{parent=projects/*/datasets/*/tables/*}" + + ":\014write_stream\022\322\001\n\nAppendRows\0223.google.c" + + "loud.bigquery.storage.v1.AppendRowsReque" + + "st\0324.google.cloud.bigquery.storage.v1.Ap" + + "pendRowsResponse\"U\332A\014write_stream\202\323\344\223\002@\"" + + ";/v1/{write_stream=projects/*/datasets/*" + + "/tables/*/streams/*}:\001*(\0010\001\022\277\001\n\016GetWrite" + + "Stream\0227.google.cloud.bigquery.storage.v" + + "1.GetWriteStreamRequest\032-.google.cloud.b" + + "igquery.storage.v1.WriteStream\"E\332A\004name\202" + + "\323\344\223\0028\"3/v1/{name=projects/*/datasets/*/t" + + "ables/*/streams/*}:\001*\022\331\001\n\023FinalizeWriteS" + + "tream\022<.google.cloud.bigquery.storage.v1" + + ".FinalizeWriteStreamRequest\032=.google.clo" + + "ud.bigquery.storage.v1.FinalizeWriteStre" + + "amResponse\"E\332A\004name\202\323\344\223\0028\"3/v1/{name=pro" + + "jects/*/datasets/*/tables/*/streams/*}:\001" + + "*\022\334\001\n\027BatchCommitWriteStreams\022@.google.c" + + "loud.bigquery.storage.v1.BatchCommitWrit" + + "eStreamsRequest\032A.google.cloud.bigquery." + + "storage.v1.BatchCommitWriteStreamsRespon" + + "se\"<\332A\006parent\202\323\344\223\002-\022+/v1/{parent=project" + + "s/*/datasets/*/tables/*}\022\313\001\n\tFlushRows\0222" + + ".google.cloud.bigquery.storage.v1.FlushR" + + "owsRequest\0323.google.cloud.bigquery.stora" + + "ge.v1.FlushRowsResponse\"U\332A\014write_stream" + + "\202\323\344\223\002@\";/v1/{write_stream=projects/*/dat" + + "asets/*/tables/*/streams/*}:\001*\032\260\001\312A\036bigq" + + "uerystorage.googleapis.com\322A\213\001https://ww" + + "w.googleapis.com/auth/bigquery,https://w" + + "ww.googleapis.com/auth/bigquery.insertda" + + "ta,https://www.googleapis.com/auth/cloud" + + "-platformB\224\002\n$com.google.cloud.bigquery." + + "storage.v1B\014StorageProtoP\001Z>cloud.google" + + ".com/go/bigquery/storage/apiv1/storagepb" + + ";storagepb\252\002 Google.Cloud.BigQuery.Stora" + + "ge.V1\312\002 Google\\Cloud\\BigQuery\\Storage\\V1" + + "\352AU\n\035bigquery.googleapis.com/Table\0224proj" + + "ects/{project}/datasets/{dataset}/tables" + + "/{table}b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1.ArrowProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1.AvroProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1.ProtoBufProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1.StreamProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1.TableProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.protobuf.WrappersProto.getDescriptor(), + com.google.rpc.StatusProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1_CreateReadSessionRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1_CreateReadSessionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_CreateReadSessionRequest_descriptor, + new java.lang.String[] { + "Parent", "ReadSession", "MaxStreamCount", "PreferredMinStreamCount", + }); + internal_static_google_cloud_bigquery_storage_v1_ReadRowsRequest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1_ReadRowsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ReadRowsRequest_descriptor, + new java.lang.String[] { + "ReadStream", "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1_ThrottleState_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1_ThrottleState_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ThrottleState_descriptor, + new java.lang.String[] { + "ThrottlePercent", + }); + internal_static_google_cloud_bigquery_storage_v1_StreamStats_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_bigquery_storage_v1_StreamStats_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_StreamStats_descriptor, + new java.lang.String[] { + "Progress", + }); + internal_static_google_cloud_bigquery_storage_v1_StreamStats_Progress_descriptor = + internal_static_google_cloud_bigquery_storage_v1_StreamStats_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1_StreamStats_Progress_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_StreamStats_Progress_descriptor, + new java.lang.String[] { + "AtResponseStart", "AtResponseEnd", + }); + internal_static_google_cloud_bigquery_storage_v1_ReadRowsResponse_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_bigquery_storage_v1_ReadRowsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ReadRowsResponse_descriptor, + new java.lang.String[] { + "AvroRows", + "ArrowRecordBatch", + "RowCount", + "Stats", + "ThrottleState", + "AvroSchema", + "ArrowSchema", + "UncompressedByteSize", + "Rows", + "Schema", + }); + internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamRequest_descriptor, + new java.lang.String[] { + "Name", "Fraction", + }); + internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamResponse_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_SplitReadStreamResponse_descriptor, + new java.lang.String[] { + "PrimaryStream", "RemainderStream", + }); + internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_CreateWriteStreamRequest_descriptor, + new java.lang.String[] { + "Parent", "WriteStream", + }); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor, + new java.lang.String[] { + "WriteStream", + "Offset", + "ProtoRows", + "ArrowRows", + "TraceId", + "MissingValueInterpretations", + "DefaultMissingValueInterpretation", + "Rows", + }); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ArrowData_descriptor = + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ArrowData_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ArrowData_descriptor, + new java.lang.String[] { + "WriterSchema", "Rows", + }); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_descriptor = + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor + .getNestedTypes() + .get(1); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_ProtoData_descriptor, + new java.lang.String[] { + "WriterSchema", "Rows", + }); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_MissingValueInterpretationsEntry_descriptor = + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_descriptor + .getNestedTypes() + .get(2); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_MissingValueInterpretationsEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AppendRowsRequest_MissingValueInterpretationsEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor, + new java.lang.String[] { + "AppendResult", "Error", "UpdatedSchema", "RowErrors", "WriteStream", "Response", + }); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor = + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor, + new java.lang.String[] { + "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_GetWriteStreamRequest_descriptor, + new java.lang.String[] { + "Name", "View", + }); + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsRequest_descriptor, + new java.lang.String[] { + "Parent", "WriteStreams", + }); + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_BatchCommitWriteStreamsResponse_descriptor, + new java.lang.String[] { + "CommitTime", "StreamErrors", + }); + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_FinalizeWriteStreamResponse_descriptor, + new java.lang.String[] { + "RowCount", + }); + internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_FlushRowsRequest_descriptor, + new java.lang.String[] { + "WriteStream", "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_FlushRowsResponse_descriptor, + new java.lang.String[] { + "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_google_cloud_bigquery_storage_v1_StorageError_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor, + new java.lang.String[] { + "Code", "Entity", "ErrorMessage", + }); + internal_static_google_cloud_bigquery_storage_v1_RowError_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_google_cloud_bigquery_storage_v1_RowError_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_RowError_descriptor, + new java.lang.String[] { + "Index", "Code", "Message", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resourceDefinition); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1.ArrowProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1.AvroProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1.ProtoBufProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1.StreamProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1.TableProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.protobuf.WrappersProto.getDescriptor(); + com.google.rpc.StatusProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java new file mode 100644 index 000000000000..eeee6336b3b3 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java @@ -0,0 +1,238 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public final class StreamProto { + private StreamProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ReadSession_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ReadSession_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableModifiers_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableModifiers_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableReadOptions_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableReadOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_ReadStream_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_ReadStream_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_WriteStream_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_WriteStream_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "-google/cloud/bigquery/storage/v1/stream.proto\022 google.cloud.bigquery.storage.v" + + "1\032\037google/api/field_behavior.proto\032\031goog" + + "le/api/resource.proto\032,google/cloud/bigquery/storage/v1/arrow.proto\032+google/clou" + + "d/bigquery/storage/v1/avro.proto\032,google" + + "/cloud/bigquery/storage/v1/table.proto\032\037google/protobuf/timestamp.proto\"\303\014\n" + + "\013ReadSession\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\003\0224\n" + + "\013expire_time\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022F\n" + + "\013data_format\030\003" + + " \001(\0162,.google.cloud.bigquery.storage.v1.DataFormatB\003\340A\005\022H\n" + + "\013avro_schema\030\004" + + " \001(\0132,.google.cloud.bigquery.storage.v1.AvroSchemaB\003\340A\003H\000\022J\n" + + "\014arrow_schema\030\005" + + " \001(\0132-.google.cloud.bigquery.storage.v1.ArrowSchemaB\003\340A\003H\000\0224\n" + + "\005table\030\006 \001(\tB%\340A\005\372A\037\n" + + "\035bigquery.googleapis.com/Table\022Z\n" + + "\017table_modifiers\030\007 \001(\0132<.google.cloud.big" + + "query.storage.v1.ReadSession.TableModifiersB\003\340A\001\022Y\n" + + "\014read_options\030\010 \001(\0132>.google." + + "cloud.bigquery.storage.v1.ReadSession.TableReadOptionsB\003\340A\001\022B\n" + + "\007streams\030\n" + + " \003(\0132,.google.cloud.bigquery.storage.v1.ReadStreamB\003\340A\003\022*\n" + + "\035estimated_total_bytes_scanned\030\014 \001(\003B\003\340A\003\022/\n" + + "\"estimated_total_physical_file_size\030\017 \001(\003B\003\340A\003\022 \n" + + "\023estimated_row_count\030\016 \001(\003B\003\340A\003\022\025\n" + + "\010trace_id\030\r" + + " \001(\tB\003\340A\001\032C\n" + + "\016TableModifiers\0221\n\r" + + "snapshot_time\030\001 \001(\0132\032.google.protobuf.Timestamp\032\211\005\n" + + "\020TableReadOptions\022\027\n" + + "\017selected_fields\030\001 \003(\t\022\027\n" + + "\017row_restriction\030\002 \001(\t\022g\n" + + "\033arrow_serialization_options\030\003 \001(\0132;.google.cloud.bigquery.s" + + "torage.v1.ArrowSerializationOptionsB\003\340A\001H\000\022e\n" + + "\032avro_serialization_options\030\004 \001(\0132:" + + ".google.cloud.bigquery.storage.v1.AvroSerializationOptionsB\003\340A\001H\000\022#\n" + + "\021sample_percentage\030\005 \001(\001B\003\340A\001H\001\210\001\001\022\205\001\n" + + "\032response_compression_codec\030\006 \001(\0162W.google.cloud.bigqu" + + "ery.storage.v1.ReadSession.TableReadOpti" + + "ons.ResponseCompressionCodecB\003\340A\001H\002\210\001\001\"j\n" + + "\030ResponseCompressionCodec\022*\n" + + "&RESPONSE_COMPRESSION_CODEC_UNSPECIFIED\020\000\022\"\n" + + "\036RESPONSE_COMPRESSION_CODEC_LZ4\020\002B%\n" + + "#output_format_serialization_optionsB\024\n" + + "\022_sample_percentageB\035\n" + + "\033_response_compression_codec:k\352Ah\n" + + "*bigquerystorage.googleapis.com/Read" + + "Session\022:projects/{project}/locations/{location}/sessions/{session}B\010\n" + + "\006schema\"\234\001\n\n" + + "ReadStream\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\003:{\352Ax\n" + + ")bigquerystorage.googleapis.com/ReadStream\022" + + "Kprojects/{project}/locations/{location}/sessions/{session}/streams/{stream}\"\373\004\n" + + "\013WriteStream\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\003\022E\n" + + "\004type\030\002" + + " \001(\01622.google.cloud.bigquery.storage.v1.WriteStream.TypeB\003\340A\005\0224\n" + + "\013create_time\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0224\n" + + "\013commit_time\030\004 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022H\n" + + "\014table_schema\030\005 \001(\0132-.googl" + + "e.cloud.bigquery.storage.v1.TableSchemaB\003\340A\003\022P\n\n" + + "write_mode\030\007 \001(\01627.google.cloud." + + "bigquery.storage.v1.WriteStream.WriteModeB\003\340A\005\022\025\n" + + "\010location\030\010 \001(\tB\003\340A\003\"F\n" + + "\004Type\022\024\n" + + "\020TYPE_UNSPECIFIED\020\000\022\r\n" + + "\tCOMMITTED\020\001\022\013\n" + + "\007PENDING\020\002\022\014\n" + + "\010BUFFERED\020\003\"3\n" + + "\tWriteMode\022\032\n" + + "\026WRITE_MODE_UNSPECIFIED\020\000\022\n\n" + + "\006INSERT\020\001:v\352As\n" + + "*bigquerystorage.googleapis.com/WriteStr" + + "eam\022Eprojects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}*>\n\n" + + "DataFormat\022\033\n" + + "\027DATA_FORMAT_UNSPECIFIED\020\000\022\010\n" + + "\004AVRO\020\001\022\t\n" + + "\005ARROW\020\002*I\n" + + "\017WriteStreamView\022!\n" + + "\035WRITE_STREAM_VIEW_UNSPECIFIED\020\000\022\t\n" + + "\005BASIC\020\001\022\010\n" + + "\004FULL\020\002B\273\001\n" + + "$com.google.cloud.bigquery.storage.v1B\013StreamProtoP\001Z>cloud.goo" + + "gle.com/go/bigquery/storage/apiv1/storagepb;storagepb\252\002" + + " Google.Cloud.BigQuery.Storage.V1\312\002 Google\\Cloud\\BigQuery\\Storage" + + "\\V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1.ArrowProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1.AvroProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1.TableProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1_ReadSession_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1_ReadSession_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ReadSession_descriptor, + new java.lang.String[] { + "Name", + "ExpireTime", + "DataFormat", + "AvroSchema", + "ArrowSchema", + "Table", + "TableModifiers", + "ReadOptions", + "Streams", + "EstimatedTotalBytesScanned", + "EstimatedTotalPhysicalFileSize", + "EstimatedRowCount", + "TraceId", + "Schema", + }); + internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableModifiers_descriptor = + internal_static_google_cloud_bigquery_storage_v1_ReadSession_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableModifiers_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableModifiers_descriptor, + new java.lang.String[] { + "SnapshotTime", + }); + internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableReadOptions_descriptor = + internal_static_google_cloud_bigquery_storage_v1_ReadSession_descriptor + .getNestedTypes() + .get(1); + internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableReadOptions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableReadOptions_descriptor, + new java.lang.String[] { + "SelectedFields", + "RowRestriction", + "ArrowSerializationOptions", + "AvroSerializationOptions", + "SamplePercentage", + "ResponseCompressionCodec", + "OutputFormatSerializationOptions", + }); + internal_static_google_cloud_bigquery_storage_v1_ReadStream_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1_ReadStream_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_ReadStream_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_bigquery_storage_v1_WriteStream_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1_WriteStream_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_WriteStream_descriptor, + new java.lang.String[] { + "Name", "Type", "CreateTime", "CommitTime", "TableSchema", "WriteMode", "Location", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.ResourceProto.resource); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1.ArrowProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1.AvroProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1.TableProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStats.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStats.java new file mode 100644 index 000000000000..67e8c63674f4 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStats.java @@ -0,0 +1,1447 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Estimated stream statistics for a given read Stream.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.StreamStats} + */ +public final class StreamStats extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.StreamStats) + StreamStatsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StreamStats.newBuilder() to construct. + private StreamStats(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamStats() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StreamStats(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StreamStats_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StreamStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.StreamStats.class, + com.google.cloud.bigquery.storage.v1.StreamStats.Builder.class); + } + + public interface ProgressOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.StreamStats.Progress) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The fraction of rows assigned to the stream that have been processed by
+     * the server so far, not including the rows in the current response
+     * message.
+     *
+     * This value, along with `at_response_end`, can be used to interpolate
+     * the progress made as the rows in the message are being processed using
+     * the following formula: `at_response_start + (at_response_end -
+     * at_response_start) * rows_processed_from_response / rows_in_response`.
+     *
+     * Note that if a filter is provided, the `at_response_end` value of the
+     * previous response may not necessarily be equal to the
+     * `at_response_start` value of the current response.
+     * 
+ * + * double at_response_start = 1; + * + * @return The atResponseStart. + */ + double getAtResponseStart(); + + /** + * + * + *
+     * Similar to `at_response_start`, except that this value includes the
+     * rows in the current response.
+     * 
+ * + * double at_response_end = 2; + * + * @return The atResponseEnd. + */ + double getAtResponseEnd(); + } + + /** Protobuf type {@code google.cloud.bigquery.storage.v1.StreamStats.Progress} */ + public static final class Progress extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.StreamStats.Progress) + ProgressOrBuilder { + private static final long serialVersionUID = 0L; + + // Use Progress.newBuilder() to construct. + private Progress(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Progress() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new Progress(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StreamStats_Progress_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StreamStats_Progress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.StreamStats.Progress.class, + com.google.cloud.bigquery.storage.v1.StreamStats.Progress.Builder.class); + } + + public static final int AT_RESPONSE_START_FIELD_NUMBER = 1; + private double atResponseStart_ = 0D; + + /** + * + * + *
+     * The fraction of rows assigned to the stream that have been processed by
+     * the server so far, not including the rows in the current response
+     * message.
+     *
+     * This value, along with `at_response_end`, can be used to interpolate
+     * the progress made as the rows in the message are being processed using
+     * the following formula: `at_response_start + (at_response_end -
+     * at_response_start) * rows_processed_from_response / rows_in_response`.
+     *
+     * Note that if a filter is provided, the `at_response_end` value of the
+     * previous response may not necessarily be equal to the
+     * `at_response_start` value of the current response.
+     * 
+ * + * double at_response_start = 1; + * + * @return The atResponseStart. + */ + @java.lang.Override + public double getAtResponseStart() { + return atResponseStart_; + } + + public static final int AT_RESPONSE_END_FIELD_NUMBER = 2; + private double atResponseEnd_ = 0D; + + /** + * + * + *
+     * Similar to `at_response_start`, except that this value includes the
+     * rows in the current response.
+     * 
+ * + * double at_response_end = 2; + * + * @return The atResponseEnd. + */ + @java.lang.Override + public double getAtResponseEnd() { + return atResponseEnd_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (java.lang.Double.doubleToRawLongBits(atResponseStart_) != 0) { + output.writeDouble(1, atResponseStart_); + } + if (java.lang.Double.doubleToRawLongBits(atResponseEnd_) != 0) { + output.writeDouble(2, atResponseEnd_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (java.lang.Double.doubleToRawLongBits(atResponseStart_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(1, atResponseStart_); + } + if (java.lang.Double.doubleToRawLongBits(atResponseEnd_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(2, atResponseEnd_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.StreamStats.Progress)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.StreamStats.Progress other = + (com.google.cloud.bigquery.storage.v1.StreamStats.Progress) obj; + + if (java.lang.Double.doubleToLongBits(getAtResponseStart()) + != java.lang.Double.doubleToLongBits(other.getAtResponseStart())) return false; + if (java.lang.Double.doubleToLongBits(getAtResponseEnd()) + != java.lang.Double.doubleToLongBits(other.getAtResponseEnd())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + AT_RESPONSE_START_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getAtResponseStart())); + hash = (37 * hash) + AT_RESPONSE_END_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getAtResponseEnd())); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.StreamStats.Progress prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code google.cloud.bigquery.storage.v1.StreamStats.Progress} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.StreamStats.Progress) + com.google.cloud.bigquery.storage.v1.StreamStats.ProgressOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StreamStats_Progress_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StreamStats_Progress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.StreamStats.Progress.class, + com.google.cloud.bigquery.storage.v1.StreamStats.Progress.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.StreamStats.Progress.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + atResponseStart_ = 0D; + atResponseEnd_ = 0D; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StreamStats_Progress_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StreamStats.Progress getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.StreamStats.Progress.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StreamStats.Progress build() { + com.google.cloud.bigquery.storage.v1.StreamStats.Progress result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StreamStats.Progress buildPartial() { + com.google.cloud.bigquery.storage.v1.StreamStats.Progress result = + new com.google.cloud.bigquery.storage.v1.StreamStats.Progress(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.StreamStats.Progress result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.atResponseStart_ = atResponseStart_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.atResponseEnd_ = atResponseEnd_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.StreamStats.Progress) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.StreamStats.Progress) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.StreamStats.Progress other) { + if (other == com.google.cloud.bigquery.storage.v1.StreamStats.Progress.getDefaultInstance()) + return this; + if (other.getAtResponseStart() != 0D) { + setAtResponseStart(other.getAtResponseStart()); + } + if (other.getAtResponseEnd() != 0D) { + setAtResponseEnd(other.getAtResponseEnd()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 9: + { + atResponseStart_ = input.readDouble(); + bitField0_ |= 0x00000001; + break; + } // case 9 + case 17: + { + atResponseEnd_ = input.readDouble(); + bitField0_ |= 0x00000002; + break; + } // case 17 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private double atResponseStart_; + + /** + * + * + *
+       * The fraction of rows assigned to the stream that have been processed by
+       * the server so far, not including the rows in the current response
+       * message.
+       *
+       * This value, along with `at_response_end`, can be used to interpolate
+       * the progress made as the rows in the message are being processed using
+       * the following formula: `at_response_start + (at_response_end -
+       * at_response_start) * rows_processed_from_response / rows_in_response`.
+       *
+       * Note that if a filter is provided, the `at_response_end` value of the
+       * previous response may not necessarily be equal to the
+       * `at_response_start` value of the current response.
+       * 
+ * + * double at_response_start = 1; + * + * @return The atResponseStart. + */ + @java.lang.Override + public double getAtResponseStart() { + return atResponseStart_; + } + + /** + * + * + *
+       * The fraction of rows assigned to the stream that have been processed by
+       * the server so far, not including the rows in the current response
+       * message.
+       *
+       * This value, along with `at_response_end`, can be used to interpolate
+       * the progress made as the rows in the message are being processed using
+       * the following formula: `at_response_start + (at_response_end -
+       * at_response_start) * rows_processed_from_response / rows_in_response`.
+       *
+       * Note that if a filter is provided, the `at_response_end` value of the
+       * previous response may not necessarily be equal to the
+       * `at_response_start` value of the current response.
+       * 
+ * + * double at_response_start = 1; + * + * @param value The atResponseStart to set. + * @return This builder for chaining. + */ + public Builder setAtResponseStart(double value) { + + atResponseStart_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The fraction of rows assigned to the stream that have been processed by
+       * the server so far, not including the rows in the current response
+       * message.
+       *
+       * This value, along with `at_response_end`, can be used to interpolate
+       * the progress made as the rows in the message are being processed using
+       * the following formula: `at_response_start + (at_response_end -
+       * at_response_start) * rows_processed_from_response / rows_in_response`.
+       *
+       * Note that if a filter is provided, the `at_response_end` value of the
+       * previous response may not necessarily be equal to the
+       * `at_response_start` value of the current response.
+       * 
+ * + * double at_response_start = 1; + * + * @return This builder for chaining. + */ + public Builder clearAtResponseStart() { + bitField0_ = (bitField0_ & ~0x00000001); + atResponseStart_ = 0D; + onChanged(); + return this; + } + + private double atResponseEnd_; + + /** + * + * + *
+       * Similar to `at_response_start`, except that this value includes the
+       * rows in the current response.
+       * 
+ * + * double at_response_end = 2; + * + * @return The atResponseEnd. + */ + @java.lang.Override + public double getAtResponseEnd() { + return atResponseEnd_; + } + + /** + * + * + *
+       * Similar to `at_response_start`, except that this value includes the
+       * rows in the current response.
+       * 
+ * + * double at_response_end = 2; + * + * @param value The atResponseEnd to set. + * @return This builder for chaining. + */ + public Builder setAtResponseEnd(double value) { + + atResponseEnd_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Similar to `at_response_start`, except that this value includes the
+       * rows in the current response.
+       * 
+ * + * double at_response_end = 2; + * + * @return This builder for chaining. + */ + public Builder clearAtResponseEnd() { + bitField0_ = (bitField0_ & ~0x00000002); + atResponseEnd_ = 0D; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.StreamStats.Progress) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.StreamStats.Progress) + private static final com.google.cloud.bigquery.storage.v1.StreamStats.Progress DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.StreamStats.Progress(); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats.Progress getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Progress parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StreamStats.Progress getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int PROGRESS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1.StreamStats.Progress progress_; + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + * + * @return Whether the progress field is set. + */ + @java.lang.Override + public boolean hasProgress() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + * + * @return The progress. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StreamStats.Progress getProgress() { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1.StreamStats.Progress.getDefaultInstance() + : progress_; + } + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StreamStats.ProgressOrBuilder getProgressOrBuilder() { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1.StreamStats.Progress.getDefaultInstance() + : progress_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getProgress()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getProgress()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.StreamStats)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.StreamStats other = + (com.google.cloud.bigquery.storage.v1.StreamStats) obj; + + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.StreamStats prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Estimated stream statistics for a given read Stream.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.StreamStats} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.StreamStats) + com.google.cloud.bigquery.storage.v1.StreamStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StreamStats_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StreamStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.StreamStats.class, + com.google.cloud.bigquery.storage.v1.StreamStats.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.StreamStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getProgressFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_StreamStats_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StreamStats getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.StreamStats.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StreamStats build() { + com.google.cloud.bigquery.storage.v1.StreamStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StreamStats buildPartial() { + com.google.cloud.bigquery.storage.v1.StreamStats result = + new com.google.cloud.bigquery.storage.v1.StreamStats(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.StreamStats result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.progress_ = progressBuilder_ == null ? progress_ : progressBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.StreamStats) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.StreamStats) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.StreamStats other) { + if (other == com.google.cloud.bigquery.storage.v1.StreamStats.getDefaultInstance()) + return this; + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + input.readMessage(getProgressFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1.StreamStats.Progress progress_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.StreamStats.Progress, + com.google.cloud.bigquery.storage.v1.StreamStats.Progress.Builder, + com.google.cloud.bigquery.storage.v1.StreamStats.ProgressOrBuilder> + progressBuilder_; + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + * + * @return The progress. + */ + public com.google.cloud.bigquery.storage.v1.StreamStats.Progress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1.StreamStats.Progress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + */ + public Builder setProgress(com.google.cloud.bigquery.storage.v1.StreamStats.Progress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + } else { + progressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + */ + public Builder setProgress( + com.google.cloud.bigquery.storage.v1.StreamStats.Progress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + */ + public Builder mergeProgress(com.google.cloud.bigquery.storage.v1.StreamStats.Progress value) { + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && progress_ != null + && progress_ + != com.google.cloud.bigquery.storage.v1.StreamStats.Progress.getDefaultInstance()) { + getProgressBuilder().mergeFrom(value); + } else { + progress_ = value; + } + } else { + progressBuilder_.mergeFrom(value); + } + if (progress_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000001); + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + */ + public com.google.cloud.bigquery.storage.v1.StreamStats.Progress.Builder getProgressBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getProgressFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + */ + public com.google.cloud.bigquery.storage.v1.StreamStats.ProgressOrBuilder + getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1.StreamStats.Progress.getDefaultInstance() + : progress_; + } + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.StreamStats.Progress, + com.google.cloud.bigquery.storage.v1.StreamStats.Progress.Builder, + com.google.cloud.bigquery.storage.v1.StreamStats.ProgressOrBuilder> + getProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.StreamStats.Progress, + com.google.cloud.bigquery.storage.v1.StreamStats.Progress.Builder, + com.google.cloud.bigquery.storage.v1.StreamStats.ProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.StreamStats) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.StreamStats) + private static final com.google.cloud.bigquery.storage.v1.StreamStats DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.StreamStats(); + } + + public static com.google.cloud.bigquery.storage.v1.StreamStats getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StreamStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.StreamStats getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStatsOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStatsOrBuilder.java new file mode 100644 index 000000000000..64bc8745ad58 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStatsOrBuilder.java @@ -0,0 +1,63 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface StreamStatsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.StreamStats) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + * + * @return The progress. + */ + com.google.cloud.bigquery.storage.v1.StreamStats.Progress getProgress(); + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.StreamStats.Progress progress = 2; + */ + com.google.cloud.bigquery.storage.v1.StreamStats.ProgressOrBuilder getProgressOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java new file mode 100644 index 000000000000..3f303282c4a6 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java @@ -0,0 +1,4480 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/table.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * TableFieldSchema defines a single field/column within a table schema.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.TableFieldSchema} + */ +public final class TableFieldSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.TableFieldSchema) + TableFieldSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use TableFieldSchema.newBuilder() to construct. + private TableFieldSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableFieldSchema() { + name_ = ""; + type_ = 0; + mode_ = 0; + fields_ = java.util.Collections.emptyList(); + description_ = ""; + defaultValueExpression_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableFieldSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.class, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder.class); + } + + /** Protobuf enum {@code google.cloud.bigquery.storage.v1.TableFieldSchema.Type} */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Illegal value
+     * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
+     * 64K, UTF8
+     * 
+ * + * STRING = 1; + */ + STRING(1), + /** + * + * + *
+     * 64-bit signed
+     * 
+ * + * INT64 = 2; + */ + INT64(2), + /** + * + * + *
+     * 64-bit IEEE floating point
+     * 
+ * + * DOUBLE = 3; + */ + DOUBLE(3), + /** + * + * + *
+     * Aggregate type
+     * 
+ * + * STRUCT = 4; + */ + STRUCT(4), + /** + * + * + *
+     * 64K, Binary
+     * 
+ * + * BYTES = 5; + */ + BYTES(5), + /** + * + * + *
+     * 2-valued
+     * 
+ * + * BOOL = 6; + */ + BOOL(6), + /** + * + * + *
+     * 64-bit signed usec since UTC epoch
+     * 
+ * + * TIMESTAMP = 7; + */ + TIMESTAMP(7), + /** + * + * + *
+     * Civil date - Year, Month, Day
+     * 
+ * + * DATE = 8; + */ + DATE(8), + /** + * + * + *
+     * Civil time - Hour, Minute, Second, Microseconds
+     * 
+ * + * TIME = 9; + */ + TIME(9), + /** + * + * + *
+     * Combination of civil date and civil time
+     * 
+ * + * DATETIME = 10; + */ + DATETIME(10), + /** + * + * + *
+     * Geography object
+     * 
+ * + * GEOGRAPHY = 11; + */ + GEOGRAPHY(11), + /** + * + * + *
+     * Numeric value
+     * 
+ * + * NUMERIC = 12; + */ + NUMERIC(12), + /** + * + * + *
+     * BigNumeric value
+     * 
+ * + * BIGNUMERIC = 13; + */ + BIGNUMERIC(13), + /** + * + * + *
+     * Interval
+     * 
+ * + * INTERVAL = 14; + */ + INTERVAL(14), + /** + * + * + *
+     * JSON, String
+     * 
+ * + * JSON = 15; + */ + JSON(15), + /** + * + * + *
+     * RANGE
+     * 
+ * + * RANGE = 16; + */ + RANGE(16), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Illegal value
+     * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * 64K, UTF8
+     * 
+ * + * STRING = 1; + */ + public static final int STRING_VALUE = 1; + + /** + * + * + *
+     * 64-bit signed
+     * 
+ * + * INT64 = 2; + */ + public static final int INT64_VALUE = 2; + + /** + * + * + *
+     * 64-bit IEEE floating point
+     * 
+ * + * DOUBLE = 3; + */ + public static final int DOUBLE_VALUE = 3; + + /** + * + * + *
+     * Aggregate type
+     * 
+ * + * STRUCT = 4; + */ + public static final int STRUCT_VALUE = 4; + + /** + * + * + *
+     * 64K, Binary
+     * 
+ * + * BYTES = 5; + */ + public static final int BYTES_VALUE = 5; + + /** + * + * + *
+     * 2-valued
+     * 
+ * + * BOOL = 6; + */ + public static final int BOOL_VALUE = 6; + + /** + * + * + *
+     * 64-bit signed usec since UTC epoch
+     * 
+ * + * TIMESTAMP = 7; + */ + public static final int TIMESTAMP_VALUE = 7; + + /** + * + * + *
+     * Civil date - Year, Month, Day
+     * 
+ * + * DATE = 8; + */ + public static final int DATE_VALUE = 8; + + /** + * + * + *
+     * Civil time - Hour, Minute, Second, Microseconds
+     * 
+ * + * TIME = 9; + */ + public static final int TIME_VALUE = 9; + + /** + * + * + *
+     * Combination of civil date and civil time
+     * 
+ * + * DATETIME = 10; + */ + public static final int DATETIME_VALUE = 10; + + /** + * + * + *
+     * Geography object
+     * 
+ * + * GEOGRAPHY = 11; + */ + public static final int GEOGRAPHY_VALUE = 11; + + /** + * + * + *
+     * Numeric value
+     * 
+ * + * NUMERIC = 12; + */ + public static final int NUMERIC_VALUE = 12; + + /** + * + * + *
+     * BigNumeric value
+     * 
+ * + * BIGNUMERIC = 13; + */ + public static final int BIGNUMERIC_VALUE = 13; + + /** + * + * + *
+     * Interval
+     * 
+ * + * INTERVAL = 14; + */ + public static final int INTERVAL_VALUE = 14; + + /** + * + * + *
+     * JSON, String
+     * 
+ * + * JSON = 15; + */ + public static final int JSON_VALUE = 15; + + /** + * + * + *
+     * RANGE
+     * 
+ * + * RANGE = 16; + */ + public static final int RANGE_VALUE = 16; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return STRING; + case 2: + return INT64; + case 3: + return DOUBLE; + case 4: + return STRUCT; + case 5: + return BYTES; + case 6: + return BOOL; + case 7: + return TIMESTAMP; + case 8: + return DATE; + case 9: + return TIME; + case 10: + return DATETIME; + case 11: + return GEOGRAPHY; + case 12: + return NUMERIC; + case 13: + return BIGNUMERIC; + case 14: + return INTERVAL; + case 15: + return JSON; + case 16: + return RANGE; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.TableFieldSchema.Type) + } + + /** Protobuf enum {@code google.cloud.bigquery.storage.v1.TableFieldSchema.Mode} */ + public enum Mode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Illegal value
+     * 
+ * + * MODE_UNSPECIFIED = 0; + */ + MODE_UNSPECIFIED(0), + /** NULLABLE = 1; */ + NULLABLE(1), + /** REQUIRED = 2; */ + REQUIRED(2), + /** REPEATED = 3; */ + REPEATED(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Illegal value
+     * 
+ * + * MODE_UNSPECIFIED = 0; + */ + public static final int MODE_UNSPECIFIED_VALUE = 0; + + /** NULLABLE = 1; */ + public static final int NULLABLE_VALUE = 1; + + /** REQUIRED = 2; */ + public static final int REQUIRED_VALUE = 2; + + /** REPEATED = 3; */ + public static final int REPEATED_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Mode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Mode forNumber(int value) { + switch (value) { + case 0: + return MODE_UNSPECIFIED; + case 1: + return NULLABLE; + case 2: + return REQUIRED; + case 3: + return REPEATED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Mode findValueByNumber(int number) { + return Mode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDescriptor() + .getEnumTypes() + .get(1); + } + + private static final Mode[] VALUES = values(); + + public static Mode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Mode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.TableFieldSchema.Mode) + } + + public interface FieldElementTypeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. The type of a field element.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + + /** + * + * + *
+     * Required. The type of a field element.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type getType(); + } + + /** + * + * + *
+   * Represents the type of a field element.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType} + */ + public static final class FieldElementType extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType) + FieldElementTypeOrBuilder { + private static final long serialVersionUID = 0L; + + // Use FieldElementType.newBuilder() to construct. + private FieldElementType(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FieldElementType() { + type_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FieldElementType(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_FieldElementType_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_FieldElementType_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType.class, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType.Builder.class); + } + + public static final int TYPE_FIELD_NUMBER = 1; + private int type_ = 0; + + /** + * + * + *
+     * Required. The type of a field element.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
+     * Required. The type of a field element.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type getType() { + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type result = + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.forNumber(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (type_ + != com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(1, type_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (type_ + != com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, type_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType other = + (com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType) obj; + + if (type_ != other.type_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Represents the type of a field element.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType) + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementTypeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_FieldElementType_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_FieldElementType_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType.class, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + type_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_FieldElementType_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType build() { + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType buildPartial() { + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType result = + new com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.type_ = type_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType other) { + if (other + == com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + .getDefaultInstance()) return this; + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + type_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int type_ = 0; + + /** + * + * + *
+       * Required. The type of a field element.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
+       * Required. The type of a field element.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + type_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. The type of a field element.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type getType() { + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type result = + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.forNumber(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.UNRECOGNIZED + : result; + } + + /** + * + * + *
+       * Required. The type of a field element.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + type_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. The type of a field element.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return This builder for chaining. + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000001); + type_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType) + private static final com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType(); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FieldElementType parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The field name. The name must contain only letters (a-z, A-Z),
+   * numbers (0-9), or underscores (_), and must start with a letter or
+   * underscore. The maximum length is 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The field name. The name must contain only letters (a-z, A-Z),
+   * numbers (0-9), or underscores (_), and must start with a letter or
+   * underscore. The maximum length is 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private int type_ = 0; + + /** + * + * + *
+   * Required. The field data type.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
+   * Required. The field data type.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type getType() { + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type result = + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.forNumber(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.UNRECOGNIZED + : result; + } + + public static final int MODE_FIELD_NUMBER = 3; + private int mode_ = 0; + + /** + * + * + *
+   * Optional. The field mode. The default value is NULLABLE.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + @java.lang.Override + public int getModeValue() { + return mode_; + } + + /** + * + * + *
+   * Optional. The field mode. The default value is NULLABLE.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode getMode() { + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode result = + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.forNumber(mode_); + return result == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.UNRECOGNIZED + : result; + } + + public static final int FIELDS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List fields_; + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to
+   * STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getFieldsList() { + return fields_; + } + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to
+   * STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getFieldsOrBuilderList() { + return fields_; + } + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to
+   * STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getFieldsCount() { + return fields_.size(); + } + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to
+   * STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema getFields(int index) { + return fields_.get(index); + } + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to
+   * STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + return fields_.get(index); + } + + public static final int DESCRIPTION_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object description_ = ""; + + /** + * + * + *
+   * Optional. The field description. The maximum length is 1,024 characters.
+   * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + @java.lang.Override + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The field description. The maximum length is 1,024 characters.
+   * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int MAX_LENGTH_FIELD_NUMBER = 7; + private long maxLength_ = 0L; + + /** + * + * + *
+   * Optional. Maximum length of values of this field for STRINGS or BYTES.
+   *
+   * If max_length is not specified, no maximum length constraint is imposed
+   * on this field.
+   *
+   * If type = "STRING", then max_length represents the maximum UTF-8
+   * length of strings in this field.
+   *
+   * If type = "BYTES", then max_length represents the maximum number of
+   * bytes in this field.
+   *
+   * It is invalid to set this field if type is not "STRING" or "BYTES".
+   * 
+ * + * int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxLength. + */ + @java.lang.Override + public long getMaxLength() { + return maxLength_; + } + + public static final int PRECISION_FIELD_NUMBER = 8; + private long precision_ = 0L; + + /** + * + * + *
+   * Optional. Precision (maximum number of total digits in base 10) and scale
+   * (maximum number of digits in the fractional part in base 10) constraints
+   * for values of this field for NUMERIC or BIGNUMERIC.
+   *
+   * It is invalid to set precision or scale if type is not "NUMERIC" or
+   * "BIGNUMERIC".
+   *
+   * If precision and scale are not specified, no value range constraint is
+   * imposed on this field insofar as values are permitted by the type.
+   *
+   * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+   *
+   * * Precision (P) and scale (S) are specified:
+   *   [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+   * * Precision (P) is specified but not scale (and thus scale is
+   *   interpreted to be equal to zero):
+   *   [-10^P + 1, 10^P - 1].
+   *
+   * Acceptable values for precision and scale if both are specified:
+   *
+   * * If type = "NUMERIC":
+   *   1 <= precision - scale <= 29 and 0 <= scale <= 9.
+   * * If type = "BIGNUMERIC":
+   *   1 <= precision - scale <= 38 and 0 <= scale <= 38.
+   *
+   * Acceptable values for precision if only precision is specified but not
+   * scale (and thus scale is interpreted to be equal to zero):
+   *
+   * * If type = "NUMERIC": 1 <= precision <= 29.
+   * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+   *
+   * If scale is specified but not precision, then it is invalid.
+   * 
+ * + * int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The precision. + */ + @java.lang.Override + public long getPrecision() { + return precision_; + } + + public static final int SCALE_FIELD_NUMBER = 9; + private long scale_ = 0L; + + /** + * + * + *
+   * Optional. See documentation for precision.
+   * 
+ * + * int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The scale. + */ + @java.lang.Override + public long getScale() { + return scale_; + } + + public static final int DEFAULT_VALUE_EXPRESSION_FIELD_NUMBER = 10; + + @SuppressWarnings("serial") + private volatile java.lang.Object defaultValueExpression_ = ""; + + /** + * + * + *
+   * Optional. A SQL expression to specify the [default value]
+   * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+   * 
+ * + * string default_value_expression = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The defaultValueExpression. + */ + @java.lang.Override + public java.lang.String getDefaultValueExpression() { + java.lang.Object ref = defaultValueExpression_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + defaultValueExpression_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. A SQL expression to specify the [default value]
+   * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+   * 
+ * + * string default_value_expression = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for defaultValueExpression. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDefaultValueExpressionBytes() { + java.lang.Object ref = defaultValueExpression_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + defaultValueExpression_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TIMESTAMP_PRECISION_FIELD_NUMBER = 27; + private com.google.protobuf.Int64Value timestampPrecision_; + + /** + * + * + *
+   * Optional. Precision (maximum number of total digits in base 10) for seconds
+   * of TIMESTAMP type.
+   *
+   * Possible values include:
+   *
+   * * 6 (Default, for TIMESTAMP type with microsecond precision)
+   * * 12 (For TIMESTAMP type with picosecond precision)
+   * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the timestampPrecision field is set. + */ + @java.lang.Override + public boolean hasTimestampPrecision() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. Precision (maximum number of total digits in base 10) for seconds
+   * of TIMESTAMP type.
+   *
+   * Possible values include:
+   *
+   * * 6 (Default, for TIMESTAMP type with microsecond precision)
+   * * 12 (For TIMESTAMP type with picosecond precision)
+   * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The timestampPrecision. + */ + @java.lang.Override + public com.google.protobuf.Int64Value getTimestampPrecision() { + return timestampPrecision_ == null + ? com.google.protobuf.Int64Value.getDefaultInstance() + : timestampPrecision_; + } + + /** + * + * + *
+   * Optional. Precision (maximum number of total digits in base 10) for seconds
+   * of TIMESTAMP type.
+   *
+   * Possible values include:
+   *
+   * * 6 (Default, for TIMESTAMP type with microsecond precision)
+   * * 12 (For TIMESTAMP type with picosecond precision)
+   * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.Int64ValueOrBuilder getTimestampPrecisionOrBuilder() { + return timestampPrecision_ == null + ? com.google.protobuf.Int64Value.getDefaultInstance() + : timestampPrecision_; + } + + public static final int RANGE_ELEMENT_TYPE_FIELD_NUMBER = 11; + private com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType rangeElementType_; + + /** + * + * + *
+   * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+   * the type is RANGE, this field is required. Possible values for the field
+   * element type of a RANGE include:
+   * * DATE
+   * * DATETIME
+   * * TIMESTAMP
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the rangeElementType field is set. + */ + @java.lang.Override + public boolean hasRangeElementType() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+   * the type is RANGE, this field is required. Possible values for the field
+   * element type of a RANGE include:
+   * * DATE
+   * * DATETIME
+   * * TIMESTAMP
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The rangeElementType. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + getRangeElementType() { + return rangeElementType_ == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + .getDefaultInstance() + : rangeElementType_; + } + + /** + * + * + *
+   * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+   * the type is RANGE, this field is required. Possible values for the field
+   * element type of a RANGE include:
+   * * DATE
+   * * DATETIME
+   * * TIMESTAMP
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementTypeOrBuilder + getRangeElementTypeOrBuilder() { + return rangeElementType_ == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + .getDefaultInstance() + : rangeElementType_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, type_); + } + if (mode_ + != com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.MODE_UNSPECIFIED + .getNumber()) { + output.writeEnum(3, mode_); + } + for (int i = 0; i < fields_.size(); i++) { + output.writeMessage(4, fields_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(description_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, description_); + } + if (maxLength_ != 0L) { + output.writeInt64(7, maxLength_); + } + if (precision_ != 0L) { + output.writeInt64(8, precision_); + } + if (scale_ != 0L) { + output.writeInt64(9, scale_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(defaultValueExpression_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 10, defaultValueExpression_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(11, getRangeElementType()); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(27, getTimestampPrecision()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, type_); + } + if (mode_ + != com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.MODE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, mode_); + } + for (int i = 0; i < fields_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, fields_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(description_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, description_); + } + if (maxLength_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(7, maxLength_); + } + if (precision_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(8, precision_); + } + if (scale_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(9, scale_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(defaultValueExpression_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(10, defaultValueExpression_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, getRangeElementType()); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(27, getTimestampPrecision()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.TableFieldSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.TableFieldSchema other = + (com.google.cloud.bigquery.storage.v1.TableFieldSchema) obj; + + if (!getName().equals(other.getName())) return false; + if (type_ != other.type_) return false; + if (mode_ != other.mode_) return false; + if (!getFieldsList().equals(other.getFieldsList())) return false; + if (!getDescription().equals(other.getDescription())) return false; + if (getMaxLength() != other.getMaxLength()) return false; + if (getPrecision() != other.getPrecision()) return false; + if (getScale() != other.getScale()) return false; + if (!getDefaultValueExpression().equals(other.getDefaultValueExpression())) return false; + if (hasTimestampPrecision() != other.hasTimestampPrecision()) return false; + if (hasTimestampPrecision()) { + if (!getTimestampPrecision().equals(other.getTimestampPrecision())) return false; + } + if (hasRangeElementType() != other.hasRangeElementType()) return false; + if (hasRangeElementType()) { + if (!getRangeElementType().equals(other.getRangeElementType())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + hash = (37 * hash) + MODE_FIELD_NUMBER; + hash = (53 * hash) + mode_; + if (getFieldsCount() > 0) { + hash = (37 * hash) + FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getFieldsList().hashCode(); + } + hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; + hash = (53 * hash) + getDescription().hashCode(); + hash = (37 * hash) + MAX_LENGTH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMaxLength()); + hash = (37 * hash) + PRECISION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getPrecision()); + hash = (37 * hash) + SCALE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getScale()); + hash = (37 * hash) + DEFAULT_VALUE_EXPRESSION_FIELD_NUMBER; + hash = (53 * hash) + getDefaultValueExpression().hashCode(); + if (hasTimestampPrecision()) { + hash = (37 * hash) + TIMESTAMP_PRECISION_FIELD_NUMBER; + hash = (53 * hash) + getTimestampPrecision().hashCode(); + } + if (hasRangeElementType()) { + hash = (37 * hash) + RANGE_ELEMENT_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getRangeElementType().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1.TableFieldSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * TableFieldSchema defines a single field/column within a table schema.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.TableFieldSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.TableFieldSchema) + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.class, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.TableFieldSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getFieldsFieldBuilder(); + getTimestampPrecisionFieldBuilder(); + getRangeElementTypeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + type_ = 0; + mode_ = 0; + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + } else { + fields_ = null; + fieldsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + description_ = ""; + maxLength_ = 0L; + precision_ = 0L; + scale_ = 0L; + defaultValueExpression_ = ""; + timestampPrecision_ = null; + if (timestampPrecisionBuilder_ != null) { + timestampPrecisionBuilder_.dispose(); + timestampPrecisionBuilder_ = null; + } + rangeElementType_ = null; + if (rangeElementTypeBuilder_ != null) { + rangeElementTypeBuilder_.dispose(); + rangeElementTypeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema build() { + com.google.cloud.bigquery.storage.v1.TableFieldSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema buildPartial() { + com.google.cloud.bigquery.storage.v1.TableFieldSchema result = + new com.google.cloud.bigquery.storage.v1.TableFieldSchema(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1.TableFieldSchema result) { + if (fieldsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.fields_ = fields_; + } else { + result.fields_ = fieldsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.TableFieldSchema result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = type_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.mode_ = mode_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.description_ = description_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.maxLength_ = maxLength_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.precision_ = precision_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.scale_ = scale_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.defaultValueExpression_ = defaultValueExpression_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000200) != 0)) { + result.timestampPrecision_ = + timestampPrecisionBuilder_ == null + ? timestampPrecision_ + : timestampPrecisionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.rangeElementType_ = + rangeElementTypeBuilder_ == null ? rangeElementType_ : rangeElementTypeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.TableFieldSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.TableFieldSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.TableFieldSchema other) { + if (other == com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + if (other.mode_ != 0) { + setModeValue(other.getModeValue()); + } + if (fieldsBuilder_ == null) { + if (!other.fields_.isEmpty()) { + if (fields_.isEmpty()) { + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureFieldsIsMutable(); + fields_.addAll(other.fields_); + } + onChanged(); + } + } else { + if (!other.fields_.isEmpty()) { + if (fieldsBuilder_.isEmpty()) { + fieldsBuilder_.dispose(); + fieldsBuilder_ = null; + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000008); + fieldsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getFieldsFieldBuilder() + : null; + } else { + fieldsBuilder_.addAllMessages(other.fields_); + } + } + } + if (!other.getDescription().isEmpty()) { + description_ = other.description_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (other.getMaxLength() != 0L) { + setMaxLength(other.getMaxLength()); + } + if (other.getPrecision() != 0L) { + setPrecision(other.getPrecision()); + } + if (other.getScale() != 0L) { + setScale(other.getScale()); + } + if (!other.getDefaultValueExpression().isEmpty()) { + defaultValueExpression_ = other.defaultValueExpression_; + bitField0_ |= 0x00000100; + onChanged(); + } + if (other.hasTimestampPrecision()) { + mergeTimestampPrecision(other.getTimestampPrecision()); + } + if (other.hasRangeElementType()) { + mergeRangeElementType(other.getRangeElementType()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + type_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + mode_ = input.readEnum(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + com.google.cloud.bigquery.storage.v1.TableFieldSchema m = + input.readMessage( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.parser(), + extensionRegistry); + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(m); + } else { + fieldsBuilder_.addMessage(m); + } + break; + } // case 34 + case 50: + { + description_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 50 + case 56: + { + maxLength_ = input.readInt64(); + bitField0_ |= 0x00000020; + break; + } // case 56 + case 64: + { + precision_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 64 + case 72: + { + scale_ = input.readInt64(); + bitField0_ |= 0x00000080; + break; + } // case 72 + case 82: + { + defaultValueExpression_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000100; + break; + } // case 82 + case 90: + { + input.readMessage( + getRangeElementTypeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000400; + break; + } // case 90 + case 218: + { + input.readMessage( + getTimestampPrecisionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000200; + break; + } // case 218 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int type_ = 0; + + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type getType() { + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type result = + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.forNumber(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return This builder for chaining. + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = 0; + onChanged(); + return this; + } + + private int mode_ = 0; + + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + @java.lang.Override + public int getModeValue() { + return mode_; + } + + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for mode to set. + * @return This builder for chaining. + */ + public Builder setModeValue(int value) { + mode_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode getMode() { + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode result = + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.forNumber(mode_); + return result == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The mode to set. + * @return This builder for chaining. + */ + public Builder setMode(com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + mode_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearMode() { + bitField0_ = (bitField0_ & ~0x00000004); + mode_ = 0; + onChanged(); + return this; + } + + private java.util.List fields_ = + java.util.Collections.emptyList(); + + private void ensureFieldsIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + fields_ = + new java.util.ArrayList(fields_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder> + fieldsBuilder_; + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getFieldsList() { + if (fieldsBuilder_ == null) { + return java.util.Collections.unmodifiableList(fields_); + } else { + return fieldsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getFieldsCount() { + if (fieldsBuilder_ == null) { + return fields_.size(); + } else { + return fieldsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema getFields(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.set(index, value); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.set(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields(com.google.cloud.bigquery.storage.v1.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(value); + onChanged(); + } else { + fieldsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(index, value); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllFields( + java.lang.Iterable + values) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); + onChanged(); + } else { + fieldsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearFields() { + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + fieldsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeFields(int index) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.remove(index); + onChanged(); + } else { + fieldsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder getFieldsBuilder( + int index) { + return getFieldsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsOrBuilderList() { + if (fieldsBuilder_ != null) { + return fieldsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(fields_); + } + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder addFieldsBuilder() { + return getFieldsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder addFieldsBuilder( + int index) { + return getFieldsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to
+     * STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsBuilderList() { + return getFieldsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder> + getFieldsFieldBuilder() { + if (fieldsBuilder_ == null) { + fieldsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder>( + fields_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); + fields_ = null; + } + return fieldsBuilder_; + } + + private java.lang.Object description_ = ""; + + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The description to set. + * @return This builder for chaining. + */ + public Builder setDescription(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + description_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDescription() { + description_ = getDefaultInstance().getDescription(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for description to set. + * @return This builder for chaining. + */ + public Builder setDescriptionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + description_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private long maxLength_; + + /** + * + * + *
+     * Optional. Maximum length of values of this field for STRINGS or BYTES.
+     *
+     * If max_length is not specified, no maximum length constraint is imposed
+     * on this field.
+     *
+     * If type = "STRING", then max_length represents the maximum UTF-8
+     * length of strings in this field.
+     *
+     * If type = "BYTES", then max_length represents the maximum number of
+     * bytes in this field.
+     *
+     * It is invalid to set this field if type is not "STRING" or "BYTES".
+     * 
+ * + * int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxLength. + */ + @java.lang.Override + public long getMaxLength() { + return maxLength_; + } + + /** + * + * + *
+     * Optional. Maximum length of values of this field for STRINGS or BYTES.
+     *
+     * If max_length is not specified, no maximum length constraint is imposed
+     * on this field.
+     *
+     * If type = "STRING", then max_length represents the maximum UTF-8
+     * length of strings in this field.
+     *
+     * If type = "BYTES", then max_length represents the maximum number of
+     * bytes in this field.
+     *
+     * It is invalid to set this field if type is not "STRING" or "BYTES".
+     * 
+ * + * int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The maxLength to set. + * @return This builder for chaining. + */ + public Builder setMaxLength(long value) { + + maxLength_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Maximum length of values of this field for STRINGS or BYTES.
+     *
+     * If max_length is not specified, no maximum length constraint is imposed
+     * on this field.
+     *
+     * If type = "STRING", then max_length represents the maximum UTF-8
+     * length of strings in this field.
+     *
+     * If type = "BYTES", then max_length represents the maximum number of
+     * bytes in this field.
+     *
+     * It is invalid to set this field if type is not "STRING" or "BYTES".
+     * 
+ * + * int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearMaxLength() { + bitField0_ = (bitField0_ & ~0x00000020); + maxLength_ = 0L; + onChanged(); + return this; + } + + private long precision_; + + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) and scale
+     * (maximum number of digits in the fractional part in base 10) constraints
+     * for values of this field for NUMERIC or BIGNUMERIC.
+     *
+     * It is invalid to set precision or scale if type is not "NUMERIC" or
+     * "BIGNUMERIC".
+     *
+     * If precision and scale are not specified, no value range constraint is
+     * imposed on this field insofar as values are permitted by the type.
+     *
+     * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+     *
+     * * Precision (P) and scale (S) are specified:
+     *   [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+     * * Precision (P) is specified but not scale (and thus scale is
+     *   interpreted to be equal to zero):
+     *   [-10^P + 1, 10^P - 1].
+     *
+     * Acceptable values for precision and scale if both are specified:
+     *
+     * * If type = "NUMERIC":
+     *   1 <= precision - scale <= 29 and 0 <= scale <= 9.
+     * * If type = "BIGNUMERIC":
+     *   1 <= precision - scale <= 38 and 0 <= scale <= 38.
+     *
+     * Acceptable values for precision if only precision is specified but not
+     * scale (and thus scale is interpreted to be equal to zero):
+     *
+     * * If type = "NUMERIC": 1 <= precision <= 29.
+     * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+     *
+     * If scale is specified but not precision, then it is invalid.
+     * 
+ * + * int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The precision. + */ + @java.lang.Override + public long getPrecision() { + return precision_; + } + + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) and scale
+     * (maximum number of digits in the fractional part in base 10) constraints
+     * for values of this field for NUMERIC or BIGNUMERIC.
+     *
+     * It is invalid to set precision or scale if type is not "NUMERIC" or
+     * "BIGNUMERIC".
+     *
+     * If precision and scale are not specified, no value range constraint is
+     * imposed on this field insofar as values are permitted by the type.
+     *
+     * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+     *
+     * * Precision (P) and scale (S) are specified:
+     *   [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+     * * Precision (P) is specified but not scale (and thus scale is
+     *   interpreted to be equal to zero):
+     *   [-10^P + 1, 10^P - 1].
+     *
+     * Acceptable values for precision and scale if both are specified:
+     *
+     * * If type = "NUMERIC":
+     *   1 <= precision - scale <= 29 and 0 <= scale <= 9.
+     * * If type = "BIGNUMERIC":
+     *   1 <= precision - scale <= 38 and 0 <= scale <= 38.
+     *
+     * Acceptable values for precision if only precision is specified but not
+     * scale (and thus scale is interpreted to be equal to zero):
+     *
+     * * If type = "NUMERIC": 1 <= precision <= 29.
+     * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+     *
+     * If scale is specified but not precision, then it is invalid.
+     * 
+ * + * int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The precision to set. + * @return This builder for chaining. + */ + public Builder setPrecision(long value) { + + precision_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) and scale
+     * (maximum number of digits in the fractional part in base 10) constraints
+     * for values of this field for NUMERIC or BIGNUMERIC.
+     *
+     * It is invalid to set precision or scale if type is not "NUMERIC" or
+     * "BIGNUMERIC".
+     *
+     * If precision and scale are not specified, no value range constraint is
+     * imposed on this field insofar as values are permitted by the type.
+     *
+     * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+     *
+     * * Precision (P) and scale (S) are specified:
+     *   [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+     * * Precision (P) is specified but not scale (and thus scale is
+     *   interpreted to be equal to zero):
+     *   [-10^P + 1, 10^P - 1].
+     *
+     * Acceptable values for precision and scale if both are specified:
+     *
+     * * If type = "NUMERIC":
+     *   1 <= precision - scale <= 29 and 0 <= scale <= 9.
+     * * If type = "BIGNUMERIC":
+     *   1 <= precision - scale <= 38 and 0 <= scale <= 38.
+     *
+     * Acceptable values for precision if only precision is specified but not
+     * scale (and thus scale is interpreted to be equal to zero):
+     *
+     * * If type = "NUMERIC": 1 <= precision <= 29.
+     * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+     *
+     * If scale is specified but not precision, then it is invalid.
+     * 
+ * + * int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPrecision() { + bitField0_ = (bitField0_ & ~0x00000040); + precision_ = 0L; + onChanged(); + return this; + } + + private long scale_; + + /** + * + * + *
+     * Optional. See documentation for precision.
+     * 
+ * + * int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The scale. + */ + @java.lang.Override + public long getScale() { + return scale_; + } + + /** + * + * + *
+     * Optional. See documentation for precision.
+     * 
+ * + * int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The scale to set. + * @return This builder for chaining. + */ + public Builder setScale(long value) { + + scale_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. See documentation for precision.
+     * 
+ * + * int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearScale() { + bitField0_ = (bitField0_ & ~0x00000080); + scale_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object defaultValueExpression_ = ""; + + /** + * + * + *
+     * Optional. A SQL expression to specify the [default value]
+     * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+     * 
+ * + * string default_value_expression = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The defaultValueExpression. + */ + public java.lang.String getDefaultValueExpression() { + java.lang.Object ref = defaultValueExpression_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + defaultValueExpression_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. A SQL expression to specify the [default value]
+     * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+     * 
+ * + * string default_value_expression = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for defaultValueExpression. + */ + public com.google.protobuf.ByteString getDefaultValueExpressionBytes() { + java.lang.Object ref = defaultValueExpression_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + defaultValueExpression_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. A SQL expression to specify the [default value]
+     * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+     * 
+ * + * string default_value_expression = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The defaultValueExpression to set. + * @return This builder for chaining. + */ + public Builder setDefaultValueExpression(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + defaultValueExpression_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A SQL expression to specify the [default value]
+     * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+     * 
+ * + * string default_value_expression = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDefaultValueExpression() { + defaultValueExpression_ = getDefaultInstance().getDefaultValueExpression(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. A SQL expression to specify the [default value]
+     * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+     * 
+ * + * string default_value_expression = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for defaultValueExpression to set. + * @return This builder for chaining. + */ + public Builder setDefaultValueExpressionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + defaultValueExpression_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + private com.google.protobuf.Int64Value timestampPrecision_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + timestampPrecisionBuilder_; + + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) for seconds
+     * of TIMESTAMP type.
+     *
+     * Possible values include:
+     *
+     * * 6 (Default, for TIMESTAMP type with microsecond precision)
+     * * 12 (For TIMESTAMP type with picosecond precision)
+     * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the timestampPrecision field is set. + */ + public boolean hasTimestampPrecision() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) for seconds
+     * of TIMESTAMP type.
+     *
+     * Possible values include:
+     *
+     * * 6 (Default, for TIMESTAMP type with microsecond precision)
+     * * 12 (For TIMESTAMP type with picosecond precision)
+     * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The timestampPrecision. + */ + public com.google.protobuf.Int64Value getTimestampPrecision() { + if (timestampPrecisionBuilder_ == null) { + return timestampPrecision_ == null + ? com.google.protobuf.Int64Value.getDefaultInstance() + : timestampPrecision_; + } else { + return timestampPrecisionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) for seconds
+     * of TIMESTAMP type.
+     *
+     * Possible values include:
+     *
+     * * 6 (Default, for TIMESTAMP type with microsecond precision)
+     * * 12 (For TIMESTAMP type with picosecond precision)
+     * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setTimestampPrecision(com.google.protobuf.Int64Value value) { + if (timestampPrecisionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + timestampPrecision_ = value; + } else { + timestampPrecisionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) for seconds
+     * of TIMESTAMP type.
+     *
+     * Possible values include:
+     *
+     * * 6 (Default, for TIMESTAMP type with microsecond precision)
+     * * 12 (For TIMESTAMP type with picosecond precision)
+     * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setTimestampPrecision(com.google.protobuf.Int64Value.Builder builderForValue) { + if (timestampPrecisionBuilder_ == null) { + timestampPrecision_ = builderForValue.build(); + } else { + timestampPrecisionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) for seconds
+     * of TIMESTAMP type.
+     *
+     * Possible values include:
+     *
+     * * 6 (Default, for TIMESTAMP type with microsecond precision)
+     * * 12 (For TIMESTAMP type with picosecond precision)
+     * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeTimestampPrecision(com.google.protobuf.Int64Value value) { + if (timestampPrecisionBuilder_ == null) { + if (((bitField0_ & 0x00000200) != 0) + && timestampPrecision_ != null + && timestampPrecision_ != com.google.protobuf.Int64Value.getDefaultInstance()) { + getTimestampPrecisionBuilder().mergeFrom(value); + } else { + timestampPrecision_ = value; + } + } else { + timestampPrecisionBuilder_.mergeFrom(value); + } + if (timestampPrecision_ != null) { + bitField0_ |= 0x00000200; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) for seconds
+     * of TIMESTAMP type.
+     *
+     * Possible values include:
+     *
+     * * 6 (Default, for TIMESTAMP type with microsecond precision)
+     * * 12 (For TIMESTAMP type with picosecond precision)
+     * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearTimestampPrecision() { + bitField0_ = (bitField0_ & ~0x00000200); + timestampPrecision_ = null; + if (timestampPrecisionBuilder_ != null) { + timestampPrecisionBuilder_.dispose(); + timestampPrecisionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) for seconds
+     * of TIMESTAMP type.
+     *
+     * Possible values include:
+     *
+     * * 6 (Default, for TIMESTAMP type with microsecond precision)
+     * * 12 (For TIMESTAMP type with picosecond precision)
+     * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Int64Value.Builder getTimestampPrecisionBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return getTimestampPrecisionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) for seconds
+     * of TIMESTAMP type.
+     *
+     * Possible values include:
+     *
+     * * 6 (Default, for TIMESTAMP type with microsecond precision)
+     * * 12 (For TIMESTAMP type with picosecond precision)
+     * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Int64ValueOrBuilder getTimestampPrecisionOrBuilder() { + if (timestampPrecisionBuilder_ != null) { + return timestampPrecisionBuilder_.getMessageOrBuilder(); + } else { + return timestampPrecision_ == null + ? com.google.protobuf.Int64Value.getDefaultInstance() + : timestampPrecision_; + } + } + + /** + * + * + *
+     * Optional. Precision (maximum number of total digits in base 10) for seconds
+     * of TIMESTAMP type.
+     *
+     * Possible values include:
+     *
+     * * 6 (Default, for TIMESTAMP type with microsecond precision)
+     * * 12 (For TIMESTAMP type with picosecond precision)
+     * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + getTimestampPrecisionFieldBuilder() { + if (timestampPrecisionBuilder_ == null) { + timestampPrecisionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder>( + getTimestampPrecision(), getParentForChildren(), isClean()); + timestampPrecision_ = null; + } + return timestampPrecisionBuilder_; + } + + private com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + rangeElementType_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementTypeOrBuilder> + rangeElementTypeBuilder_; + + /** + * + * + *
+     * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+     * the type is RANGE, this field is required. Possible values for the field
+     * element type of a RANGE include:
+     * * DATE
+     * * DATETIME
+     * * TIMESTAMP
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the rangeElementType field is set. + */ + public boolean hasRangeElementType() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
+     * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+     * the type is RANGE, this field is required. Possible values for the field
+     * element type of a RANGE include:
+     * * DATE
+     * * DATETIME
+     * * TIMESTAMP
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The rangeElementType. + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + getRangeElementType() { + if (rangeElementTypeBuilder_ == null) { + return rangeElementType_ == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + .getDefaultInstance() + : rangeElementType_; + } else { + return rangeElementTypeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+     * the type is RANGE, this field is required. Possible values for the field
+     * element type of a RANGE include:
+     * * DATE
+     * * DATETIME
+     * * TIMESTAMP
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRangeElementType( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType value) { + if (rangeElementTypeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rangeElementType_ = value; + } else { + rangeElementTypeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+     * the type is RANGE, this field is required. Possible values for the field
+     * element type of a RANGE include:
+     * * DATE
+     * * DATETIME
+     * * TIMESTAMP
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRangeElementType( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType.Builder + builderForValue) { + if (rangeElementTypeBuilder_ == null) { + rangeElementType_ = builderForValue.build(); + } else { + rangeElementTypeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+     * the type is RANGE, this field is required. Possible values for the field
+     * element type of a RANGE include:
+     * * DATE
+     * * DATETIME
+     * * TIMESTAMP
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRangeElementType( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType value) { + if (rangeElementTypeBuilder_ == null) { + if (((bitField0_ & 0x00000400) != 0) + && rangeElementType_ != null + && rangeElementType_ + != com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + .getDefaultInstance()) { + getRangeElementTypeBuilder().mergeFrom(value); + } else { + rangeElementType_ = value; + } + } else { + rangeElementTypeBuilder_.mergeFrom(value); + } + if (rangeElementType_ != null) { + bitField0_ |= 0x00000400; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+     * the type is RANGE, this field is required. Possible values for the field
+     * element type of a RANGE include:
+     * * DATE
+     * * DATETIME
+     * * TIMESTAMP
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRangeElementType() { + bitField0_ = (bitField0_ & ~0x00000400); + rangeElementType_ = null; + if (rangeElementTypeBuilder_ != null) { + rangeElementTypeBuilder_.dispose(); + rangeElementTypeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+     * the type is RANGE, this field is required. Possible values for the field
+     * element type of a RANGE include:
+     * * DATE
+     * * DATETIME
+     * * TIMESTAMP
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType.Builder + getRangeElementTypeBuilder() { + bitField0_ |= 0x00000400; + onChanged(); + return getRangeElementTypeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+     * the type is RANGE, this field is required. Possible values for the field
+     * element type of a RANGE include:
+     * * DATE
+     * * DATETIME
+     * * TIMESTAMP
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementTypeOrBuilder + getRangeElementTypeOrBuilder() { + if (rangeElementTypeBuilder_ != null) { + return rangeElementTypeBuilder_.getMessageOrBuilder(); + } else { + return rangeElementType_ == null + ? com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType + .getDefaultInstance() + : rangeElementType_; + } + } + + /** + * + * + *
+     * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+     * the type is RANGE, this field is required. Possible values for the field
+     * element type of a RANGE include:
+     * * DATE
+     * * DATETIME
+     * * TIMESTAMP
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementTypeOrBuilder> + getRangeElementTypeFieldBuilder() { + if (rangeElementTypeBuilder_ == null) { + rangeElementTypeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementTypeOrBuilder>( + getRangeElementType(), getParentForChildren(), isClean()); + rangeElementType_ = null; + } + return rangeElementTypeBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.TableFieldSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.TableFieldSchema) + private static final com.google.cloud.bigquery.storage.v1.TableFieldSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.TableFieldSchema(); + } + + public static com.google.cloud.bigquery.storage.v1.TableFieldSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableFieldSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java new file mode 100644 index 000000000000..7562f90396f5 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java @@ -0,0 +1,442 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/table.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface TableFieldSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.TableFieldSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The field name. The name must contain only letters (a-z, A-Z),
+   * numbers (0-9), or underscores (_), and must start with a letter or
+   * underscore. The maximum length is 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The field name. The name must contain only letters (a-z, A-Z),
+   * numbers (0-9), or underscores (_), and must start with a letter or
+   * underscore. The maximum length is 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Required. The field data type.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + + /** + * + * + *
+   * Required. The field data type.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type getType(); + + /** + * + * + *
+   * Optional. The field mode. The default value is NULLABLE.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + int getModeValue(); + + /** + * + * + *
+   * Optional. The field mode. The default value is NULLABLE.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode getMode(); + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to
+   * STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getFieldsList(); + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to
+   * STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchema getFields(int index); + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to
+   * STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getFieldsCount(); + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to
+   * STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getFieldsOrBuilderList(); + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to
+   * STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder getFieldsOrBuilder(int index); + + /** + * + * + *
+   * Optional. The field description. The maximum length is 1,024 characters.
+   * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + java.lang.String getDescription(); + + /** + * + * + *
+   * Optional. The field description. The maximum length is 1,024 characters.
+   * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + com.google.protobuf.ByteString getDescriptionBytes(); + + /** + * + * + *
+   * Optional. Maximum length of values of this field for STRINGS or BYTES.
+   *
+   * If max_length is not specified, no maximum length constraint is imposed
+   * on this field.
+   *
+   * If type = "STRING", then max_length represents the maximum UTF-8
+   * length of strings in this field.
+   *
+   * If type = "BYTES", then max_length represents the maximum number of
+   * bytes in this field.
+   *
+   * It is invalid to set this field if type is not "STRING" or "BYTES".
+   * 
+ * + * int64 max_length = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The maxLength. + */ + long getMaxLength(); + + /** + * + * + *
+   * Optional. Precision (maximum number of total digits in base 10) and scale
+   * (maximum number of digits in the fractional part in base 10) constraints
+   * for values of this field for NUMERIC or BIGNUMERIC.
+   *
+   * It is invalid to set precision or scale if type is not "NUMERIC" or
+   * "BIGNUMERIC".
+   *
+   * If precision and scale are not specified, no value range constraint is
+   * imposed on this field insofar as values are permitted by the type.
+   *
+   * Values of this NUMERIC or BIGNUMERIC field must be in this range when:
+   *
+   * * Precision (P) and scale (S) are specified:
+   *   [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
+   * * Precision (P) is specified but not scale (and thus scale is
+   *   interpreted to be equal to zero):
+   *   [-10^P + 1, 10^P - 1].
+   *
+   * Acceptable values for precision and scale if both are specified:
+   *
+   * * If type = "NUMERIC":
+   *   1 <= precision - scale <= 29 and 0 <= scale <= 9.
+   * * If type = "BIGNUMERIC":
+   *   1 <= precision - scale <= 38 and 0 <= scale <= 38.
+   *
+   * Acceptable values for precision if only precision is specified but not
+   * scale (and thus scale is interpreted to be equal to zero):
+   *
+   * * If type = "NUMERIC": 1 <= precision <= 29.
+   * * If type = "BIGNUMERIC": 1 <= precision <= 38.
+   *
+   * If scale is specified but not precision, then it is invalid.
+   * 
+ * + * int64 precision = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The precision. + */ + long getPrecision(); + + /** + * + * + *
+   * Optional. See documentation for precision.
+   * 
+ * + * int64 scale = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The scale. + */ + long getScale(); + + /** + * + * + *
+   * Optional. A SQL expression to specify the [default value]
+   * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+   * 
+ * + * string default_value_expression = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The defaultValueExpression. + */ + java.lang.String getDefaultValueExpression(); + + /** + * + * + *
+   * Optional. A SQL expression to specify the [default value]
+   * (https://cloud.google.com/bigquery/docs/default-values) for this field.
+   * 
+ * + * string default_value_expression = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for defaultValueExpression. + */ + com.google.protobuf.ByteString getDefaultValueExpressionBytes(); + + /** + * + * + *
+   * Optional. Precision (maximum number of total digits in base 10) for seconds
+   * of TIMESTAMP type.
+   *
+   * Possible values include:
+   *
+   * * 6 (Default, for TIMESTAMP type with microsecond precision)
+   * * 12 (For TIMESTAMP type with picosecond precision)
+   * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the timestampPrecision field is set. + */ + boolean hasTimestampPrecision(); + + /** + * + * + *
+   * Optional. Precision (maximum number of total digits in base 10) for seconds
+   * of TIMESTAMP type.
+   *
+   * Possible values include:
+   *
+   * * 6 (Default, for TIMESTAMP type with microsecond precision)
+   * * 12 (For TIMESTAMP type with picosecond precision)
+   * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The timestampPrecision. + */ + com.google.protobuf.Int64Value getTimestampPrecision(); + + /** + * + * + *
+   * Optional. Precision (maximum number of total digits in base 10) for seconds
+   * of TIMESTAMP type.
+   *
+   * Possible values include:
+   *
+   * * 6 (Default, for TIMESTAMP type with microsecond precision)
+   * * 12 (For TIMESTAMP type with picosecond precision)
+   * 
+ * + * + * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.Int64ValueOrBuilder getTimestampPrecisionOrBuilder(); + + /** + * + * + *
+   * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+   * the type is RANGE, this field is required. Possible values for the field
+   * element type of a RANGE include:
+   * * DATE
+   * * DATETIME
+   * * TIMESTAMP
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the rangeElementType field is set. + */ + boolean hasRangeElementType(); + + /** + * + * + *
+   * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+   * the type is RANGE, this field is required. Possible values for the field
+   * element type of a RANGE include:
+   * * DATE
+   * * DATETIME
+   * * TIMESTAMP
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The rangeElementType. + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType getRangeElementType(); + + /** + * + * + *
+   * Optional. The subtype of the RANGE, if the type of this field is RANGE. If
+   * the type is RANGE, this field is required. Possible values for the field
+   * element type of a RANGE include:
+   * * DATE
+   * * DATETIME
+   * * TIMESTAMP
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType range_element_type = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementTypeOrBuilder + getRangeElementTypeOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableName.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableName.java new file mode 100644 index 000000000000..31408f73bb06 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableName.java @@ -0,0 +1,217 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class TableName implements ResourceName { + private static final PathTemplate PROJECT_DATASET_TABLE = + PathTemplate.createWithoutUrlEncoding("projects/{project}/datasets/{dataset}/tables/{table}"); + private volatile Map fieldValuesMap; + private final String project; + private final String dataset; + private final String table; + + @Deprecated + protected TableName() { + project = null; + dataset = null; + table = null; + } + + private TableName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + dataset = Preconditions.checkNotNull(builder.getDataset()); + table = Preconditions.checkNotNull(builder.getTable()); + } + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static TableName of(String project, String dataset, String table) { + return newBuilder().setProject(project).setDataset(dataset).setTable(table).build(); + } + + public static String format(String project, String dataset, String table) { + return newBuilder().setProject(project).setDataset(dataset).setTable(table).build().toString(); + } + + public static TableName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_DATASET_TABLE.validatedMatch( + formattedString, "TableName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("dataset"), matchMap.get("table")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (TableName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_DATASET_TABLE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (dataset != null) { + fieldMapBuilder.put("dataset", dataset); + } + if (table != null) { + fieldMapBuilder.put("table", table); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_DATASET_TABLE.instantiate( + "project", project, "dataset", dataset, "table", table); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + TableName that = ((TableName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.dataset, that.dataset) + && Objects.equals(this.table, that.table); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(dataset); + h *= 1000003; + h ^= Objects.hashCode(table); + return h; + } + + /** Builder for projects/{project}/datasets/{dataset}/tables/{table}. */ + public static class Builder { + private String project; + private String dataset; + private String table; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setDataset(String dataset) { + this.dataset = dataset; + return this; + } + + public Builder setTable(String table) { + this.table = table; + return this; + } + + private Builder(TableName tableName) { + this.project = tableName.project; + this.dataset = tableName.dataset; + this.table = tableName.table; + } + + public TableName build() { + return new TableName(this); + } + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java new file mode 100644 index 000000000000..3983095fd0a1 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java @@ -0,0 +1,163 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/table.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public final class TableProto { + private TableProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_TableSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_FieldElementType_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_FieldElementType_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + ",google/cloud/bigquery/storage/v1/table.proto\022 google.cloud.bigquery.storage.v1" + + "\032\037google/api/field_behavior.proto\032\036google/protobuf/wrappers.proto\"Q\n" + + "\013TableSchema\022B\n" + + "\006fields\030\001" + + " \003(\01322.google.cloud.bigquery.storage.v1.TableFieldSchema\"\274\007\n" + + "\020TableFieldSchema\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\002\022J\n" + + "\004type\030\002 \001" + + "(\01627.google.cloud.bigquery.storage.v1.TableFieldSchema.TypeB\003\340A\002\022J\n" + + "\004mode\030\003 \001(\01627" + + ".google.cloud.bigquery.storage.v1.TableFieldSchema.ModeB\003\340A\001\022G\n" + + "\006fields\030\004 \003(\01322.g" + + "oogle.cloud.bigquery.storage.v1.TableFieldSchemaB\003\340A\001\022\030\n" + + "\013description\030\006 \001(\tB\003\340A\001\022\027\n\n" + + "max_length\030\007 \001(\003B\003\340A\001\022\026\n" + + "\tprecision\030\010 \001(\003B\003\340A\001\022\022\n" + + "\005scale\030\t \001(\003B\003\340A\001\022%\n" + + "\030default_value_expression\030\n" + + " \001(\tB\003\340A\001\022=\n" + + "\023timestamp_precision\030\033" + + " \001(\0132\033.google.protobuf.Int64ValueB\003\340A\001\022d\n" + + "\022range_element_type\030\013 \001(\0132C" + + ".google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementTypeB\003\340A\001\032^\n" + + "\020FieldElementType\022J\n" + + "\004type\030\001 \001(\01627.google.clou" + + "d.bigquery.storage.v1.TableFieldSchema.TypeB\003\340A\002\"\340\001\n" + + "\004Type\022\024\n" + + "\020TYPE_UNSPECIFIED\020\000\022\n\n" + + "\006STRING\020\001\022\t\n" + + "\005INT64\020\002\022\n\n" + + "\006DOUBLE\020\003\022\n\n" + + "\006STRUCT\020\004\022\t\n" + + "\005BYTES\020\005\022\010\n" + + "\004BOOL\020\006\022\r\n" + + "\tTIMESTAMP\020\007\022\010\n" + + "\004DATE\020\010\022\010\n" + + "\004TIME\020\t\022\014\n" + + "\010DATETIME\020\n" + + "\022\r\n" + + "\tGEOGRAPHY\020\013\022\013\n" + + "\007NUMERIC\020\014\022\016\n\n" + + "BIGNUMERIC\020\r" + + "\022\014\n" + + "\010INTERVAL\020\016\022\010\n" + + "\004JSON\020\017\022\t\n" + + "\005RANGE\020\020\"F\n" + + "\004Mode\022\024\n" + + "\020MODE_UNSPECIFIED\020\000\022\014\n" + + "\010NULLABLE\020\001\022\014\n" + + "\010REQUIRED\020\002\022\014\n" + + "\010REPEATED\020\003B\272\001\n" + + "$com.google.cloud.bigquery.storage.v1B\n" + + "TableProto" + + "P\001Z>cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb\252\002" + + " Google.Cloud.BigQuery.Storage.V1\312\002 Google\\Cloud\\Big" + + "Query\\Storage\\V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.protobuf.WrappersProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1_TableSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor, + new java.lang.String[] { + "Fields", + }); + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor, + new java.lang.String[] { + "Name", + "Type", + "Mode", + "Fields", + "Description", + "MaxLength", + "Precision", + "Scale", + "DefaultValueExpression", + "TimestampPrecision", + "RangeElementType", + }); + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_FieldElementType_descriptor = + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_FieldElementType_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_FieldElementType_descriptor, + new java.lang.String[] { + "Type", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.protobuf.WrappersProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java new file mode 100644 index 000000000000..c96fb6ef4505 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java @@ -0,0 +1,975 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/table.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Schema of a table. This schema is a subset of
+ * google.cloud.bigquery.v2.TableSchema containing information necessary to
+ * generate valid message to write to BigQuery.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.TableSchema} + */ +public final class TableSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.TableSchema) + TableSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use TableSchema.newBuilder() to construct. + private TableSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableSchema() { + fields_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.TableSchema.class, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder.class); + } + + public static final int FIELDS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List fields_; + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + @java.lang.Override + public java.util.List getFieldsList() { + return fields_; + } + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + @java.lang.Override + public java.util.List + getFieldsOrBuilderList() { + return fields_; + } + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + @java.lang.Override + public int getFieldsCount() { + return fields_.size(); + } + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchema getFields(int index) { + return fields_.get(index); + } + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + return fields_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < fields_.size(); i++) { + output.writeMessage(1, fields_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < fields_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, fields_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.TableSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.TableSchema other = + (com.google.cloud.bigquery.storage.v1.TableSchema) obj; + + if (!getFieldsList().equals(other.getFieldsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getFieldsCount() > 0) { + hash = (37 * hash) + FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getFieldsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.TableSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Schema of a table. This schema is a subset of
+   * google.cloud.bigquery.v2.TableSchema containing information necessary to
+   * generate valid message to write to BigQuery.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.TableSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.TableSchema) + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.TableSchema.class, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.TableSchema.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + } else { + fields_ = null; + fieldsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.TableProto + .internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchema build() { + com.google.cloud.bigquery.storage.v1.TableSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchema buildPartial() { + com.google.cloud.bigquery.storage.v1.TableSchema result = + new com.google.cloud.bigquery.storage.v1.TableSchema(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1.TableSchema result) { + if (fieldsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.fields_ = fields_; + } else { + result.fields_ = fieldsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.TableSchema result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.TableSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.TableSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.TableSchema other) { + if (other == com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance()) + return this; + if (fieldsBuilder_ == null) { + if (!other.fields_.isEmpty()) { + if (fields_.isEmpty()) { + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureFieldsIsMutable(); + fields_.addAll(other.fields_); + } + onChanged(); + } + } else { + if (!other.fields_.isEmpty()) { + if (fieldsBuilder_.isEmpty()) { + fieldsBuilder_.dispose(); + fieldsBuilder_ = null; + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + fieldsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getFieldsFieldBuilder() + : null; + } else { + fieldsBuilder_.addAllMessages(other.fields_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1.TableFieldSchema m = + input.readMessage( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.parser(), + extensionRegistry); + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(m); + } else { + fieldsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List fields_ = + java.util.Collections.emptyList(); + + private void ensureFieldsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + fields_ = + new java.util.ArrayList(fields_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder> + fieldsBuilder_; + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public java.util.List getFieldsList() { + if (fieldsBuilder_ == null) { + return java.util.Collections.unmodifiableList(fields_); + } else { + return fieldsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public int getFieldsCount() { + if (fieldsBuilder_ == null) { + return fields_.size(); + } else { + return fieldsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema getFields(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.set(index, value); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.set(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder addFields(com.google.cloud.bigquery.storage.v1.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(value); + onChanged(); + } else { + fieldsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(index, value); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder addFields( + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder addAllFields( + java.lang.Iterable + values) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); + onChanged(); + } else { + fieldsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder clearFields() { + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + fieldsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public Builder removeFields(int index) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.remove(index); + onChanged(); + } else { + fieldsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder getFieldsBuilder( + int index) { + return getFieldsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public java.util.List + getFieldsOrBuilderList() { + if (fieldsBuilder_ != null) { + return fieldsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(fields_); + } + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder addFieldsBuilder() { + return getFieldsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder addFieldsBuilder( + int index) { + return getFieldsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1.TableFieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + public java.util.List + getFieldsBuilderList() { + return getFieldsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder> + getFieldsFieldBuilder() { + if (fieldsBuilder_ == null) { + fieldsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableFieldSchema, + com.google.cloud.bigquery.storage.v1.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder>( + fields_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + fields_ = null; + } + return fieldsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.TableSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.TableSchema) + private static final com.google.cloud.bigquery.storage.v1.TableSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.TableSchema(); + } + + public static com.google.cloud.bigquery.storage.v1.TableSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchemaOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchemaOrBuilder.java new file mode 100644 index 000000000000..d6818e3f4a88 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchemaOrBuilder.java @@ -0,0 +1,82 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/table.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface TableSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.TableSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + java.util.List getFieldsList(); + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchema getFields(int index); + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + int getFieldsCount(); + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + java.util.List + getFieldsOrBuilderList(); + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.TableFieldSchema fields = 1; + */ + com.google.cloud.bigquery.storage.v1.TableFieldSchemaOrBuilder getFieldsOrBuilder(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleState.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleState.java new file mode 100644 index 000000000000..1d721a051e40 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleState.java @@ -0,0 +1,545 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Information on if the current connection is being throttled.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ThrottleState} + */ +public final class ThrottleState extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.ThrottleState) + ThrottleStateOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ThrottleState.newBuilder() to construct. + private ThrottleState(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ThrottleState() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ThrottleState(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ThrottleState_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ThrottleState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ThrottleState.class, + com.google.cloud.bigquery.storage.v1.ThrottleState.Builder.class); + } + + public static final int THROTTLE_PERCENT_FIELD_NUMBER = 1; + private int throttlePercent_ = 0; + + /** + * + * + *
+   * How much this connection is being throttled. Zero means no throttling,
+   * 100 means fully throttled.
+   * 
+ * + * int32 throttle_percent = 1; + * + * @return The throttlePercent. + */ + @java.lang.Override + public int getThrottlePercent() { + return throttlePercent_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (throttlePercent_ != 0) { + output.writeInt32(1, throttlePercent_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (throttlePercent_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, throttlePercent_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.ThrottleState)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.ThrottleState other = + (com.google.cloud.bigquery.storage.v1.ThrottleState) obj; + + if (getThrottlePercent() != other.getThrottlePercent()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + THROTTLE_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getThrottlePercent(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.ThrottleState prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information on if the current connection is being throttled.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.ThrottleState} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.ThrottleState) + com.google.cloud.bigquery.storage.v1.ThrottleStateOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ThrottleState_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ThrottleState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.ThrottleState.class, + com.google.cloud.bigquery.storage.v1.ThrottleState.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.ThrottleState.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + throttlePercent_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_ThrottleState_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ThrottleState getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.ThrottleState.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ThrottleState build() { + com.google.cloud.bigquery.storage.v1.ThrottleState result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ThrottleState buildPartial() { + com.google.cloud.bigquery.storage.v1.ThrottleState result = + new com.google.cloud.bigquery.storage.v1.ThrottleState(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.ThrottleState result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.throttlePercent_ = throttlePercent_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.ThrottleState) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.ThrottleState) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ThrottleState other) { + if (other == com.google.cloud.bigquery.storage.v1.ThrottleState.getDefaultInstance()) + return this; + if (other.getThrottlePercent() != 0) { + setThrottlePercent(other.getThrottlePercent()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + throttlePercent_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int throttlePercent_; + + /** + * + * + *
+     * How much this connection is being throttled. Zero means no throttling,
+     * 100 means fully throttled.
+     * 
+ * + * int32 throttle_percent = 1; + * + * @return The throttlePercent. + */ + @java.lang.Override + public int getThrottlePercent() { + return throttlePercent_; + } + + /** + * + * + *
+     * How much this connection is being throttled. Zero means no throttling,
+     * 100 means fully throttled.
+     * 
+ * + * int32 throttle_percent = 1; + * + * @param value The throttlePercent to set. + * @return This builder for chaining. + */ + public Builder setThrottlePercent(int value) { + + throttlePercent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * How much this connection is being throttled. Zero means no throttling,
+     * 100 means fully throttled.
+     * 
+ * + * int32 throttle_percent = 1; + * + * @return This builder for chaining. + */ + public Builder clearThrottlePercent() { + bitField0_ = (bitField0_ & ~0x00000001); + throttlePercent_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.ThrottleState) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.ThrottleState) + private static final com.google.cloud.bigquery.storage.v1.ThrottleState DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.ThrottleState(); + } + + public static com.google.cloud.bigquery.storage.v1.ThrottleState getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ThrottleState parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.ThrottleState getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleStateOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleStateOrBuilder.java new file mode 100644 index 000000000000..7de7ac81123a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleStateOrBuilder.java @@ -0,0 +1,40 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface ThrottleStateOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.ThrottleState) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * How much this connection is being throttled. Zero means no throttling,
+   * 100 means fully throttled.
+   * 
+ * + * int32 throttle_percent = 1; + * + * @return The throttlePercent. + */ + int getThrottlePercent(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java new file mode 100644 index 000000000000..7b86f8052165 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java @@ -0,0 +1,2514 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * Information about a single stream that gets data inside the storage system.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.WriteStream} + */ +public final class WriteStream extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.WriteStream) + WriteStreamOrBuilder { + private static final long serialVersionUID = 0L; + + // Use WriteStream.newBuilder() to construct. + private WriteStream(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private WriteStream() { + name_ = ""; + type_ = 0; + writeMode_ = 0; + location_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new WriteStream(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_WriteStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_WriteStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.WriteStream.class, + com.google.cloud.bigquery.storage.v1.WriteStream.Builder.class); + } + + /** + * + * + *
+   * Type enum of the stream.
+   * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1.WriteStream.Type} + */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Unknown type.
+     * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
+     * Data will commit automatically and appear as soon as the write is
+     * acknowledged.
+     * 
+ * + * COMMITTED = 1; + */ + COMMITTED(1), + /** + * + * + *
+     * Data is invisible until the stream is committed.
+     * 
+ * + * PENDING = 2; + */ + PENDING(2), + /** + * + * + *
+     * Data is only visible up to the offset to which it was flushed.
+     * 
+ * + * BUFFERED = 3; + */ + BUFFERED(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Unknown type.
+     * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * Data will commit automatically and appear as soon as the write is
+     * acknowledged.
+     * 
+ * + * COMMITTED = 1; + */ + public static final int COMMITTED_VALUE = 1; + + /** + * + * + *
+     * Data is invisible until the stream is committed.
+     * 
+ * + * PENDING = 2; + */ + public static final int PENDING_VALUE = 2; + + /** + * + * + *
+     * Data is only visible up to the offset to which it was flushed.
+     * 
+ * + * BUFFERED = 3; + */ + public static final int BUFFERED_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return COMMITTED; + case 2: + return PENDING; + case 3: + return BUFFERED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.WriteStream.getDescriptor().getEnumTypes().get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.WriteStream.Type) + } + + /** + * + * + *
+   * Mode enum of the stream.
+   * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1.WriteStream.WriteMode} + */ + public enum WriteMode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Unknown type.
+     * 
+ * + * WRITE_MODE_UNSPECIFIED = 0; + */ + WRITE_MODE_UNSPECIFIED(0), + /** + * + * + *
+     * Insert new records into the table.
+     * It is the default value if customers do not specify it.
+     * 
+ * + * INSERT = 1; + */ + INSERT(1), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Unknown type.
+     * 
+ * + * WRITE_MODE_UNSPECIFIED = 0; + */ + public static final int WRITE_MODE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * Insert new records into the table.
+     * It is the default value if customers do not specify it.
+     * 
+ * + * INSERT = 1; + */ + public static final int INSERT_VALUE = 1; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static WriteMode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static WriteMode forNumber(int value) { + switch (value) { + case 0: + return WRITE_MODE_UNSPECIFIED; + case 1: + return INSERT; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public WriteMode findValueByNumber(int number) { + return WriteMode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.WriteStream.getDescriptor().getEnumTypes().get(1); + } + + private static final WriteMode[] VALUES = values(); + + public static WriteMode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private WriteMode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.WriteStream.WriteMode) + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private int type_ = 0; + + /** + * + * + *
+   * Immutable. Type of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
+   * Immutable. Type of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream.Type getType() { + com.google.cloud.bigquery.storage.v1.WriteStream.Type result = + com.google.cloud.bigquery.storage.v1.WriteStream.Type.forNumber(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.Type.UNRECOGNIZED + : result; + } + + public static final int CREATE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is
+   * the creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is
+   * the creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is
+   * the creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int COMMIT_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp commitTime_; + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, empty commit_time
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the commitTime field is set. + */ + @java.lang.Override + public boolean hasCommitTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, empty commit_time
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The commitTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTime() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, empty commit_time
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + + public static final int TABLE_SCHEMA_FIELD_NUMBER = 5; + private com.google.cloud.bigquery.storage.v1.TableSchema tableSchema_; + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the tableSchema field is set. + */ + @java.lang.Override + public boolean hasTableSchema() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The tableSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchema getTableSchema() { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : tableSchema_; + } + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getTableSchemaOrBuilder() { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : tableSchema_; + } + + public static final int WRITE_MODE_FIELD_NUMBER = 7; + private int writeMode_ = 0; + + /** + * + * + *
+   * Immutable. Mode of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.WriteMode write_mode = 7 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for writeMode. + */ + @java.lang.Override + public int getWriteModeValue() { + return writeMode_; + } + + /** + * + * + *
+   * Immutable. Mode of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.WriteMode write_mode = 7 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The writeMode. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode getWriteMode() { + com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode result = + com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode.forNumber(writeMode_); + return result == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode.UNRECOGNIZED + : result; + } + + public static final int LOCATION_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private volatile java.lang.Object location_ = ""; + + /** + * + * + *
+   * Output only. The geographic location where the stream's dataset resides.
+   * See https://cloud.google.com/bigquery/docs/locations for supported
+   * locations.
+   * 
+ * + * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The location. + */ + @java.lang.Override + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. The geographic location where the stream's dataset resides.
+   * See https://cloud.google.com/bigquery/docs/locations for supported
+   * locations.
+   * 
+ * + * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for location. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1.WriteStream.Type.TYPE_UNSPECIFIED.getNumber()) { + output.writeEnum(2, type_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getCommitTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(5, getTableSchema()); + } + if (writeMode_ + != com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode.WRITE_MODE_UNSPECIFIED + .getNumber()) { + output.writeEnum(7, writeMode_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(location_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 8, location_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1.WriteStream.Type.TYPE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, type_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCommitTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getTableSchema()); + } + if (writeMode_ + != com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode.WRITE_MODE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(7, writeMode_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(location_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(8, location_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.WriteStream)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.WriteStream other = + (com.google.cloud.bigquery.storage.v1.WriteStream) obj; + + if (!getName().equals(other.getName())) return false; + if (type_ != other.type_) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasCommitTime() != other.hasCommitTime()) return false; + if (hasCommitTime()) { + if (!getCommitTime().equals(other.getCommitTime())) return false; + } + if (hasTableSchema() != other.hasTableSchema()) return false; + if (hasTableSchema()) { + if (!getTableSchema().equals(other.getTableSchema())) return false; + } + if (writeMode_ != other.writeMode_) return false; + if (!getLocation().equals(other.getLocation())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasCommitTime()) { + hash = (37 * hash) + COMMIT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCommitTime().hashCode(); + } + if (hasTableSchema()) { + hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getTableSchema().hashCode(); + } + hash = (37 * hash) + WRITE_MODE_FIELD_NUMBER; + hash = (53 * hash) + writeMode_; + hash = (37 * hash) + LOCATION_FIELD_NUMBER; + hash = (53 * hash) + getLocation().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.WriteStream prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information about a single stream that gets data inside the storage system.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.WriteStream} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.WriteStream) + com.google.cloud.bigquery.storage.v1.WriteStreamOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_WriteStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_WriteStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.WriteStream.class, + com.google.cloud.bigquery.storage.v1.WriteStream.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.WriteStream.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getCreateTimeFieldBuilder(); + getCommitTimeFieldBuilder(); + getTableSchemaFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + type_ = 0; + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + commitTime_ = null; + if (commitTimeBuilder_ != null) { + commitTimeBuilder_.dispose(); + commitTimeBuilder_ = null; + } + tableSchema_ = null; + if (tableSchemaBuilder_ != null) { + tableSchemaBuilder_.dispose(); + tableSchemaBuilder_ = null; + } + writeMode_ = 0; + location_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StreamProto + .internal_static_google_cloud_bigquery_storage_v1_WriteStream_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.WriteStream.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream build() { + com.google.cloud.bigquery.storage.v1.WriteStream result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream buildPartial() { + com.google.cloud.bigquery.storage.v1.WriteStream result = + new com.google.cloud.bigquery.storage.v1.WriteStream(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1.WriteStream result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = type_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.commitTime_ = commitTimeBuilder_ == null ? commitTime_ : commitTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.tableSchema_ = + tableSchemaBuilder_ == null ? tableSchema_ : tableSchemaBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.writeMode_ = writeMode_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.location_ = location_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.WriteStream) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.WriteStream) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.WriteStream other) { + if (other == com.google.cloud.bigquery.storage.v1.WriteStream.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasCommitTime()) { + mergeCommitTime(other.getCommitTime()); + } + if (other.hasTableSchema()) { + mergeTableSchema(other.getTableSchema()); + } + if (other.writeMode_ != 0) { + setWriteModeValue(other.getWriteModeValue()); + } + if (!other.getLocation().isEmpty()) { + location_ = other.location_; + bitField0_ |= 0x00000040; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + type_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + input.readMessage(getCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage(getCommitTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage(getTableSchemaFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 56: + { + writeMode_ = input.readEnum(); + bitField0_ |= 0x00000020; + break; + } // case 56 + case 66: + { + location_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000040; + break; + } // case 66 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int type_ = 0; + + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream.Type getType() { + com.google.cloud.bigquery.storage.v1.WriteStream.Type result = + com.google.cloud.bigquery.storage.v1.WriteStream.Type.forNumber(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.Type.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(com.google.cloud.bigquery.storage.v1.WriteStream.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return This builder for chaining. + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is
+     * the creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is
+     * the creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is
+     * the creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is
+     * the creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is
+     * the creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is
+     * the creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000004); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is
+     * the creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is
+     * the creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is
+     * the creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp commitTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimeBuilder_; + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the commitTime field is set. + */ + public boolean hasCommitTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The commitTime. + */ + public com.google.protobuf.Timestamp getCommitTime() { + if (commitTimeBuilder_ == null) { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } else { + return commitTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTime_ = value; + } else { + commitTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimeBuilder_ == null) { + commitTime_ = builderForValue.build(); + } else { + commitTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && commitTime_ != null + && commitTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCommitTimeBuilder().mergeFrom(value); + } else { + commitTime_ = value; + } + } else { + commitTimeBuilder_.mergeFrom(value); + } + if (commitTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCommitTime() { + bitField0_ = (bitField0_ & ~0x00000008); + commitTime_ = null; + if (commitTimeBuilder_ != null) { + commitTimeBuilder_.dispose(); + commitTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getCommitTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + if (commitTimeBuilder_ != null) { + return commitTimeBuilder_.getMessageOrBuilder(); + } else { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, empty commit_time
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCommitTimeFieldBuilder() { + if (commitTimeBuilder_ == null) { + commitTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTime(), getParentForChildren(), isClean()); + commitTime_ = null; + } + return commitTimeBuilder_; + } + + private com.google.cloud.bigquery.storage.v1.TableSchema tableSchema_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableSchema, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder> + tableSchemaBuilder_; + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the tableSchema field is set. + */ + public boolean hasTableSchema() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The tableSchema. + */ + public com.google.cloud.bigquery.storage.v1.TableSchema getTableSchema() { + if (tableSchemaBuilder_ == null) { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : tableSchema_; + } else { + return tableSchemaBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setTableSchema(com.google.cloud.bigquery.storage.v1.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableSchema_ = value; + } else { + tableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setTableSchema( + com.google.cloud.bigquery.storage.v1.TableSchema.Builder builderForValue) { + if (tableSchemaBuilder_ == null) { + tableSchema_ = builderForValue.build(); + } else { + tableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeTableSchema(com.google.cloud.bigquery.storage.v1.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && tableSchema_ != null + && tableSchema_ + != com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance()) { + getTableSchemaBuilder().mergeFrom(value); + } else { + tableSchema_ = value; + } + } else { + tableSchemaBuilder_.mergeFrom(value); + } + if (tableSchema_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearTableSchema() { + bitField0_ = (bitField0_ & ~0x00000010); + tableSchema_ = null; + if (tableSchemaBuilder_ != null) { + tableSchemaBuilder_.dispose(); + tableSchemaBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableSchema.Builder getTableSchemaBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getTableSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getTableSchemaOrBuilder() { + if (tableSchemaBuilder_ != null) { + return tableSchemaBuilder_.getMessageOrBuilder(); + } else { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1.TableSchema.getDefaultInstance() + : tableSchema_; + } + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableSchema, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder> + getTableSchemaFieldBuilder() { + if (tableSchemaBuilder_ == null) { + tableSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.TableSchema, + com.google.cloud.bigquery.storage.v1.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder>( + getTableSchema(), getParentForChildren(), isClean()); + tableSchema_ = null; + } + return tableSchemaBuilder_; + } + + private int writeMode_ = 0; + + /** + * + * + *
+     * Immutable. Mode of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.WriteMode write_mode = 7 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for writeMode. + */ + @java.lang.Override + public int getWriteModeValue() { + return writeMode_; + } + + /** + * + * + *
+     * Immutable. Mode of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.WriteMode write_mode = 7 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The enum numeric value on the wire for writeMode to set. + * @return This builder for chaining. + */ + public Builder setWriteModeValue(int value) { + writeMode_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Mode of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.WriteMode write_mode = 7 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The writeMode. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode getWriteMode() { + com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode result = + com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode.forNumber(writeMode_); + return result == null + ? com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Immutable. Mode of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.WriteMode write_mode = 7 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The writeMode to set. + * @return This builder for chaining. + */ + public Builder setWriteMode(com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + writeMode_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Mode of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.WriteMode write_mode = 7 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return This builder for chaining. + */ + public Builder clearWriteMode() { + bitField0_ = (bitField0_ & ~0x00000020); + writeMode_ = 0; + onChanged(); + return this; + } + + private java.lang.Object location_ = ""; + + /** + * + * + *
+     * Output only. The geographic location where the stream's dataset resides.
+     * See https://cloud.google.com/bigquery/docs/locations for supported
+     * locations.
+     * 
+ * + * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The location. + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. The geographic location where the stream's dataset resides.
+     * See https://cloud.google.com/bigquery/docs/locations for supported
+     * locations.
+     * 
+ * + * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for location. + */ + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. The geographic location where the stream's dataset resides.
+     * See https://cloud.google.com/bigquery/docs/locations for supported
+     * locations.
+     * 
+ * + * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The location to set. + * @return This builder for chaining. + */ + public Builder setLocation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + location_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The geographic location where the stream's dataset resides.
+     * See https://cloud.google.com/bigquery/docs/locations for supported
+     * locations.
+     * 
+ * + * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearLocation() { + location_ = getDefaultInstance().getLocation(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The geographic location where the stream's dataset resides.
+     * See https://cloud.google.com/bigquery/docs/locations for supported
+     * locations.
+     * 
+ * + * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for location to set. + * @return This builder for chaining. + */ + public Builder setLocationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + location_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.WriteStream) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.WriteStream) + private static final com.google.cloud.bigquery.storage.v1.WriteStream DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.WriteStream(); + } + + public static com.google.cloud.bigquery.storage.v1.WriteStream getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WriteStream parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.WriteStream getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamName.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamName.java new file mode 100644 index 000000000000..38c642a06484 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamName.java @@ -0,0 +1,257 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class WriteStreamName implements ResourceName { + private static final PathTemplate PROJECT_DATASET_TABLE_STREAM = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}"); + private volatile Map fieldValuesMap; + private final String project; + private final String dataset; + private final String table; + private final String stream; + + @Deprecated + protected WriteStreamName() { + project = null; + dataset = null; + table = null; + stream = null; + } + + private WriteStreamName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + dataset = Preconditions.checkNotNull(builder.getDataset()); + table = Preconditions.checkNotNull(builder.getTable()); + stream = Preconditions.checkNotNull(builder.getStream()); + } + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public String getStream() { + return stream; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static WriteStreamName of(String project, String dataset, String table, String stream) { + return newBuilder() + .setProject(project) + .setDataset(dataset) + .setTable(table) + .setStream(stream) + .build(); + } + + public static String format(String project, String dataset, String table, String stream) { + return newBuilder() + .setProject(project) + .setDataset(dataset) + .setTable(table) + .setStream(stream) + .build() + .toString(); + } + + public static WriteStreamName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_DATASET_TABLE_STREAM.validatedMatch( + formattedString, "WriteStreamName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("dataset"), + matchMap.get("table"), + matchMap.get("stream")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (WriteStreamName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_DATASET_TABLE_STREAM.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (dataset != null) { + fieldMapBuilder.put("dataset", dataset); + } + if (table != null) { + fieldMapBuilder.put("table", table); + } + if (stream != null) { + fieldMapBuilder.put("stream", stream); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_DATASET_TABLE_STREAM.instantiate( + "project", project, "dataset", dataset, "table", table, "stream", stream); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + WriteStreamName that = ((WriteStreamName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.dataset, that.dataset) + && Objects.equals(this.table, that.table) + && Objects.equals(this.stream, that.stream); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(dataset); + h *= 1000003; + h ^= Objects.hashCode(table); + h *= 1000003; + h ^= Objects.hashCode(stream); + return h; + } + + /** Builder for projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}. */ + public static class Builder { + private String project; + private String dataset; + private String table; + private String stream; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public String getStream() { + return stream; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setDataset(String dataset) { + this.dataset = dataset; + return this; + } + + public Builder setTable(String table) { + this.table = table; + return this; + } + + public Builder setStream(String stream) { + this.stream = stream; + return this; + } + + private Builder(WriteStreamName writeStreamName) { + this.project = writeStreamName.project; + this.dataset = writeStreamName.dataset; + this.table = writeStreamName.table; + this.stream = writeStreamName.stream; + } + + public WriteStreamName build() { + return new WriteStreamName(this); + } + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java new file mode 100644 index 000000000000..fa73bd663d13 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java @@ -0,0 +1,288 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +public interface WriteStreamOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.WriteStream) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Immutable. Type of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + + /** + * + * + *
+   * Immutable. Type of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The type. + */ + com.google.cloud.bigquery.storage.v1.WriteStream.Type getType(); + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is
+   * the creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is
+   * the creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is
+   * the creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, empty commit_time
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the commitTime field is set. + */ + boolean hasCommitTime(); + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, empty commit_time
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The commitTime. + */ + com.google.protobuf.Timestamp getCommitTime(); + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, empty commit_time
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the tableSchema field is set. + */ + boolean hasTableSchema(); + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The tableSchema. + */ + com.google.cloud.bigquery.storage.v1.TableSchema getTableSchema(); + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getTableSchemaOrBuilder(); + + /** + * + * + *
+   * Immutable. Mode of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.WriteMode write_mode = 7 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for writeMode. + */ + int getWriteModeValue(); + + /** + * + * + *
+   * Immutable. Mode of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1.WriteStream.WriteMode write_mode = 7 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The writeMode. + */ + com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode getWriteMode(); + + /** + * + * + *
+   * Output only. The geographic location where the stream's dataset resides.
+   * See https://cloud.google.com/bigquery/docs/locations for supported
+   * locations.
+   * 
+ * + * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The location. + */ + java.lang.String getLocation(); + + /** + * + * + *
+   * Output only. The geographic location where the stream's dataset resides.
+   * See https://cloud.google.com/bigquery/docs/locations for supported
+   * locations.
+   * 
+ * + * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for location. + */ + com.google.protobuf.ByteString getLocationBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamView.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamView.java new file mode 100644 index 000000000000..683bf85004b8 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamView.java @@ -0,0 +1,188 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * WriteStreamView is a view enum that controls what details about a write
+ * stream should be returned.
+ * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1.WriteStreamView} + */ +public enum WriteStreamView implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+   * The default / unset value.
+   * 
+ * + * WRITE_STREAM_VIEW_UNSPECIFIED = 0; + */ + WRITE_STREAM_VIEW_UNSPECIFIED(0), + /** + * + * + *
+   * The BASIC projection returns basic metadata about a write stream.  The
+   * basic view does not include schema information.  This is the default view
+   * returned by GetWriteStream.
+   * 
+ * + * BASIC = 1; + */ + BASIC(1), + /** + * + * + *
+   * The FULL projection returns all available write stream metadata, including
+   * the schema.  CreateWriteStream returns the full projection of write stream
+   * metadata.
+   * 
+ * + * FULL = 2; + */ + FULL(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+   * The default / unset value.
+   * 
+ * + * WRITE_STREAM_VIEW_UNSPECIFIED = 0; + */ + public static final int WRITE_STREAM_VIEW_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+   * The BASIC projection returns basic metadata about a write stream.  The
+   * basic view does not include schema information.  This is the default view
+   * returned by GetWriteStream.
+   * 
+ * + * BASIC = 1; + */ + public static final int BASIC_VALUE = 1; + + /** + * + * + *
+   * The FULL projection returns all available write stream metadata, including
+   * the schema.  CreateWriteStream returns the full projection of write stream
+   * metadata.
+   * 
+ * + * FULL = 2; + */ + public static final int FULL_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static WriteStreamView valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static WriteStreamView forNumber(int value) { + switch (value) { + case 0: + return WRITE_STREAM_VIEW_UNSPECIFIED; + case 1: + return BASIC; + case 2: + return FULL; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public WriteStreamView findValueByNumber(int number) { + return WriteStreamView.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StreamProto.getDescriptor().getEnumTypes().get(1); + } + + private static final WriteStreamView[] VALUES = values(); + + public static WriteStreamView valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private WriteStreamView(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.WriteStreamView) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/annotations.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/annotations.proto new file mode 100644 index 000000000000..5ea5016dfd49 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/annotations.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +import "google/protobuf/descriptor.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +extend google.protobuf.FieldOptions { + // Setting the column_name extension allows users to reference + // bigquery column independently of the field name in the protocol buffer + // message. + // + // The intended use of this annotation is to reference a destination column + // named using characters unavailable for protobuf field names (e.g. unicode + // characters). + // + // More details about BigQuery naming limitations can be found here: + // https://cloud.google.com/bigquery/docs/schemas#column_names + // + // This extension is currently experimental. + optional string column_name = 454943157; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/arrow.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/arrow.proto new file mode 100644 index 000000000000..0132aab1935b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/arrow.proto @@ -0,0 +1,89 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "ArrowProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +// Arrow schema as specified in +// https://arrow.apache.org/docs/python/api/datatypes.html +// and serialized to bytes using IPC: +// https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc +// +// See code samples on how this message can be deserialized. +message ArrowSchema { + // IPC serialized Arrow schema. + bytes serialized_schema = 1; +} + +// Arrow RecordBatch. +message ArrowRecordBatch { + // IPC-serialized Arrow RecordBatch. + bytes serialized_record_batch = 1; + + // [Deprecated] The count of rows in `serialized_record_batch`. + // Please use the format-independent ReadRowsResponse.row_count instead. + int64 row_count = 2 [deprecated = true]; +} + +// Contains options specific to Arrow Serialization. +message ArrowSerializationOptions { + // Compression codec's supported by Arrow. + enum CompressionCodec { + // If unspecified no compression will be used. + COMPRESSION_UNSPECIFIED = 0; + + // LZ4 Frame (https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md) + LZ4_FRAME = 1; + + // Zstandard compression. + ZSTD = 2; + } + + // The precision of the timestamp value in the Avro message. This precision + // will **only** be applied to the column(s) with the `TIMESTAMP_PICOS` type. + enum PicosTimestampPrecision { + // Unspecified timestamp precision. The default precision is microseconds. + PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0; + + // Timestamp values returned by Read API will be truncated to microsecond + // level precision. The value will be encoded as Arrow TIMESTAMP type in a + // 64 bit integer. + TIMESTAMP_PRECISION_MICROS = 1; + + // Timestamp values returned by Read API will be truncated to nanosecond + // level precision. The value will be encoded as Arrow TIMESTAMP type in a + // 64 bit integer. + TIMESTAMP_PRECISION_NANOS = 2; + + // Read API will return full precision picosecond value. The value will be + // encoded as a string which conforms to ISO 8601 format. + TIMESTAMP_PRECISION_PICOS = 3; + } + + // The compression codec to use for Arrow buffers in serialized record + // batches. + CompressionCodec buffer_compression = 2; + + // Optional. Set timestamp precision option. If not set, the default precision + // is microseconds. + PicosTimestampPrecision picos_timestamp_precision = 3; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/avro.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/avro.proto new file mode 100644 index 000000000000..6082fa58d86e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/avro.proto @@ -0,0 +1,81 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "AvroProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +// Avro schema. +message AvroSchema { + // Json serialized schema, as described at + // https://avro.apache.org/docs/1.8.1/spec.html. + string schema = 1; +} + +// Avro rows. +message AvroRows { + // Binary serialized rows in a block. + bytes serialized_binary_rows = 1; + + // [Deprecated] The count of rows in the returning block. + // Please use the format-independent ReadRowsResponse.row_count instead. + int64 row_count = 2 [deprecated = true]; +} + +// Contains options specific to Avro Serialization. +message AvroSerializationOptions { + // The precision of the timestamp value in the Avro message. This precision + // will **only** be applied to the column(s) with the `TIMESTAMP_PICOS` type. + enum PicosTimestampPrecision { + // Unspecified timestamp precision. The default precision is microseconds. + PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0; + + // Timestamp values returned by Read API will be truncated to microsecond + // level precision. The value will be encoded as Avro TIMESTAMP type in a + // 64 bit integer. + TIMESTAMP_PRECISION_MICROS = 1; + + // Timestamp values returned by Read API will be truncated to nanosecond + // level precision. The value will be encoded as Avro TIMESTAMP type in a + // 64 bit integer. + TIMESTAMP_PRECISION_NANOS = 2; + + // Read API will return full precision picosecond value. The value will be + // encoded as a string which conforms to ISO 8601 format. + TIMESTAMP_PRECISION_PICOS = 3; + } + + // Enable displayName attribute in Avro schema. + // + // The Avro specification requires field names to be alphanumeric. By + // default, in cases when column names do not conform to these requirements + // (e.g. non-ascii unicode codepoints) and Avro is requested as an output + // format, the CreateReadSession call will fail. + // + // Setting this field to true, populates avro field names with a placeholder + // value and populates a "displayName" attribute for every avro field with the + // original column name. + bool enable_display_name_attribute = 1; + + // Optional. Set timestamp precision option. If not set, the default precision + // is microseconds. + PicosTimestampPrecision picos_timestamp_precision = 2; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/protobuf.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/protobuf.proto new file mode 100644 index 000000000000..2713e057839f --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/protobuf.proto @@ -0,0 +1,48 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +import "google/protobuf/descriptor.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "ProtoBufProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +// ProtoSchema describes the schema of the serialized protocol buffer data rows. +message ProtoSchema { + // Descriptor for input message. The provided descriptor must be self + // contained, such that data rows sent can be fully decoded using only the + // single descriptor. For data rows that are compositions of multiple + // independent messages, this means the descriptor may need to be transformed + // to only use nested types: + // https://developers.google.com/protocol-buffers/docs/proto#nested + // + // For additional information for how proto types and values map onto BigQuery + // see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions + google.protobuf.DescriptorProto proto_descriptor = 1; +} + +message ProtoRows { + // A sequence of rows serialized as a Protocol Buffer. + // + // See https://developers.google.com/protocol-buffers/docs/overview for more + // information on deserializing this field. + repeated bytes serialized_rows = 1; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto new file mode 100644 index 000000000000..dc0ae7f9f4d8 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto @@ -0,0 +1,786 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1/arrow.proto"; +import "google/cloud/bigquery/storage/v1/avro.proto"; +import "google/cloud/bigquery/storage/v1/protobuf.proto"; +import "google/cloud/bigquery/storage/v1/stream.proto"; +import "google/cloud/bigquery/storage/v1/table.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "StorageProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; +option (google.api.resource_definition) = { + type: "bigquery.googleapis.com/Table" + pattern: "projects/{project}/datasets/{dataset}/tables/{table}" +}; + +// BigQuery Read API. +// +// The Read API can be used to read data from BigQuery. +service BigQueryRead { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a new read session. A read session divides the contents of a + // BigQuery table into one or more streams, which can then be used to read + // data from the table. The read session also specifies properties of the + // data to be read, such as a list of columns or a push-down filter describing + // the rows to be returned. + // + // A particular row can be read by at most one stream. When the caller has + // reached the end of each stream in the session, then all the data in the + // table has been read. + // + // Data is assigned to each stream such that roughly the same number of + // rows can be read from each stream. Because the server-side unit for + // assigning data is collections of rows, the API does not guarantee that + // each stream will return the same number or rows. Additionally, the + // limits are enforced based on the number of pre-filtered rows, so some + // filters can lead to lopsided assignments. + // + // Read sessions automatically expire 6 hours after they are created and do + // not require manual clean-up by the caller. + rpc CreateReadSession(CreateReadSessionRequest) returns (ReadSession) { + option (google.api.http) = { + post: "/v1/{read_session.table=projects/*/datasets/*/tables/*}" + body: "*" + }; + option (google.api.method_signature) = + "parent,read_session,max_stream_count"; + } + + // Reads rows from the stream in the format prescribed by the ReadSession. + // Each response contains one or more table rows, up to a maximum of 128 MB + // per response; read requests which attempt to read individual rows larger + // than 128 MB will fail. + // + // Each request also returns a set of stream statistics reflecting the current + // state of the stream. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { + get: "/v1/{read_stream=projects/*/locations/*/sessions/*/streams/*}" + }; + option (google.api.method_signature) = "read_stream,offset"; + } + + // Splits a given `ReadStream` into two `ReadStream` objects. These + // `ReadStream` objects are referred to as the primary and the residual + // streams of the split. The original `ReadStream` can still be read from in + // the same manner as before. Both of the returned `ReadStream` objects can + // also be read from, and the rows returned by both child streams will be + // the same as the rows read from the original stream. + // + // Moreover, the two child streams will be allocated back-to-back in the + // original `ReadStream`. Concretely, it is guaranteed that for streams + // original, primary, and residual, that original[0-j] = primary[0-j] and + // original[j-n] = residual[0-m] once the streams have been read to + // completion. + rpc SplitReadStream(SplitReadStreamRequest) + returns (SplitReadStreamResponse) { + option (google.api.http) = { + get: "/v1/{name=projects/*/locations/*/sessions/*/streams/*}" + }; + } +} + +// BigQuery Write API. +// +// The Write API can be used to write data to BigQuery. +// +// For supplementary information about the Write API, see: +// https://cloud.google.com/bigquery/docs/write-api +service BigQueryWrite { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/bigquery.insertdata," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a write stream to the given table. + // Additionally, every table has a special stream named '_default' + // to which data can be written. This stream doesn't need to be created using + // CreateWriteStream. It is a stream that can be used simultaneously by any + // number of clients. Data written to this stream is considered committed as + // soon as an acknowledgement is received. + rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/datasets/*/tables/*}" + body: "write_stream" + }; + option (google.api.method_signature) = "parent,write_stream"; + } + + // Appends data to the given stream. + // + // If `offset` is specified, the `offset` is checked against the end of + // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an + // attempt is made to append to an offset beyond the current end of the stream + // or `ALREADY_EXISTS` if user provides an `offset` that has already been + // written to. User can retry with adjusted offset within the same RPC + // connection. If `offset` is not specified, append happens at the end of the + // stream. + // + // The response contains an optional offset at which the append + // happened. No offset information will be returned for appends to a + // default stream. + // + // Responses are received in the same order in which requests are sent. + // There will be one response for each successful inserted request. Responses + // may optionally embed error information if the originating AppendRequest was + // not successfully processed. + // + // The specifics of when successfully appended data is made visible to the + // table are governed by the type of stream: + // + // * For COMMITTED streams (which includes the default stream), data is + // visible immediately upon successful append. + // + // * For BUFFERED streams, data is made visible via a subsequent `FlushRows` + // rpc which advances a cursor to a newer offset in the stream. + // + // * For PENDING streams, data is not made visible until the stream itself is + // finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly + // committed via the `BatchCommitWriteStreams` rpc. + rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) { + option (google.api.http) = { + post: "/v1/{write_stream=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "write_stream"; + } + + // Gets information about a write stream. + rpc GetWriteStream(GetWriteStreamRequest) returns (WriteStream) { + option (google.api.http) = { + post: "/v1/{name=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Finalize a write stream so that no new data can be appended to the + // stream. Finalize is not supported on the '_default' stream. + rpc FinalizeWriteStream(FinalizeWriteStreamRequest) + returns (FinalizeWriteStreamResponse) { + option (google.api.http) = { + post: "/v1/{name=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Atomically commits a group of `PENDING` streams that belong to the same + // `parent` table. + // + // Streams must be finalized before commit and cannot be committed multiple + // times. Once a stream is committed, data in the stream becomes available + // for read operations. + rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) + returns (BatchCommitWriteStreamsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/datasets/*/tables/*}" + }; + option (google.api.method_signature) = "parent"; + } + + // Flushes rows to a BUFFERED stream. + // + // If users are appending rows to BUFFERED stream, flush operation is + // required in order for the rows to become available for reading. A + // Flush operation flushes up to any previously flushed offset in a BUFFERED + // stream, to the offset specified in the request. + // + // Flush is not supported on the _default stream, since it is not BUFFERED. + rpc FlushRows(FlushRowsRequest) returns (FlushRowsResponse) { + option (google.api.http) = { + post: "/v1/{write_stream=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "write_stream"; + } +} + +// Request message for `CreateReadSession`. +message CreateReadSessionRequest { + // Required. The request project that owns the session, in the form of + // `projects/{project_id}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Required. Session to be created. + ReadSession read_session = 2 [(google.api.field_behavior) = REQUIRED]; + + // Max initial number of streams. If unset or zero, the server will + // provide a value of streams so as to produce reasonable throughput. Must be + // non-negative. The number of streams may be lower than the requested number, + // depending on the amount parallelism that is reasonable for the table. + // There is a default system max limit of 1,000. + // + // This must be greater than or equal to preferred_min_stream_count. + // Typically, clients should either leave this unset to let the system to + // determine an upper bound OR set this a size for the maximum "units of work" + // it can gracefully handle. + int32 max_stream_count = 3; + + // The minimum preferred stream count. This parameter can be used to inform + // the service that there is a desired lower bound on the number of streams. + // This is typically a target parallelism of the client (e.g. a Spark + // cluster with N-workers would set this to a low multiple of N to ensure + // good cluster utilization). + // + // The system will make a best effort to provide at least this number of + // streams, but in some cases might provide less. + int32 preferred_min_stream_count = 4; +} + +// Request message for `ReadRows`. +message ReadRowsRequest { + // Required. Stream to read rows from. + string read_stream = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/ReadStream" + } + ]; + + // The offset requested must be less than the last row read from Read. + // Requesting a larger offset is undefined. If not specified, start reading + // from offset zero. + int64 offset = 2; +} + +// Information on if the current connection is being throttled. +message ThrottleState { + // How much this connection is being throttled. Zero means no throttling, + // 100 means fully throttled. + int32 throttle_percent = 1; +} + +// Estimated stream statistics for a given read Stream. +message StreamStats { + message Progress { + // The fraction of rows assigned to the stream that have been processed by + // the server so far, not including the rows in the current response + // message. + // + // This value, along with `at_response_end`, can be used to interpolate + // the progress made as the rows in the message are being processed using + // the following formula: `at_response_start + (at_response_end - + // at_response_start) * rows_processed_from_response / rows_in_response`. + // + // Note that if a filter is provided, the `at_response_end` value of the + // previous response may not necessarily be equal to the + // `at_response_start` value of the current response. + double at_response_start = 1; + + // Similar to `at_response_start`, except that this value includes the + // rows in the current response. + double at_response_end = 2; + } + + // Represents the progress of the current stream. + Progress progress = 2; +} + +// Response from calling `ReadRows` may include row data, progress and +// throttling information. +message ReadRowsResponse { + // Row data is returned in format specified during session creation. + oneof rows { + // Serialized row data in AVRO format. + AvroRows avro_rows = 3; + + // Serialized row data in Arrow RecordBatch format. + ArrowRecordBatch arrow_record_batch = 4; + } + + // Number of serialized rows in the rows block. + int64 row_count = 6; + + // Statistics for the stream. + StreamStats stats = 2; + + // Throttling state. If unset, the latest response still describes + // the current throttling status. + ThrottleState throttle_state = 5; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. This schema is equivalent to the one returned by + // CreateSession. This field is only populated in the first ReadRowsResponse + // RPC. + oneof schema { + // Output only. Avro schema. + AvroSchema avro_schema = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Arrow schema. + ArrowSchema arrow_schema = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Optional. If the row data in this ReadRowsResponse is compressed, then + // uncompressed byte size is the original size of the uncompressed row data. + // If it is set to a value greater than 0, then decompress into a buffer of + // size uncompressed_byte_size using the compression codec that was requested + // during session creation time and which is specified in + // TableReadOptions.response_compression_codec in ReadSession. + // This value is not set if no response_compression_codec was not requested + // and it is -1 if the requested compression would not have reduced the size + // of this ReadRowsResponse's row data. This attempts to match Apache Arrow's + // behavior described here https://github.com/apache/arrow/issues/15102 where + // the uncompressed length may be set to -1 to indicate that the data that + // follows is not compressed, which can be useful for cases where compression + // does not yield appreciable savings. When uncompressed_byte_size is not + // greater than 0, the client should skip decompression. + optional int64 uncompressed_byte_size = 9 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for `SplitReadStream`. +message SplitReadStreamRequest { + // Required. Name of the stream to split. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/ReadStream" + } + ]; + + // A value in the range (0.0, 1.0) that specifies the fractional point at + // which the original stream should be split. The actual split point is + // evaluated on pre-filtered rows, so if a filter is provided, then there is + // no guarantee that the division of the rows between the new child streams + // will be proportional to this fractional value. Additionally, because the + // server-side unit for assigning data is collections of rows, this fraction + // will always map to a data storage boundary on the server side. + double fraction = 2; +} + +// Response message for `SplitReadStream`. +message SplitReadStreamResponse { + // Primary stream, which contains the beginning portion of + // |original_stream|. An empty value indicates that the original stream can no + // longer be split. + ReadStream primary_stream = 1; + + // Remainder stream, which contains the tail of |original_stream|. An empty + // value indicates that the original stream can no longer be split. + ReadStream remainder_stream = 2; +} + +// Request message for `CreateWriteStream`. +message CreateWriteStreamRequest { + // Required. Reference to the table to which the stream belongs, in the format + // of `projects/{project}/datasets/{dataset}/tables/{table}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Required. Stream to be created. + WriteStream write_stream = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for `AppendRows`. +// +// Because AppendRows is a bidirectional streaming RPC, certain parts of the +// AppendRowsRequest need only be specified for the first request before +// switching table destinations. You can also switch table destinations within +// the same connection for the default stream. +// +// The size of a single AppendRowsRequest must be less than 10 MB in size. +// Requests larger than this return an error, typically `INVALID_ARGUMENT`. +message AppendRowsRequest { + // Arrow schema and data. + message ArrowData { + // Optional. Arrow Schema used to serialize the data. + ArrowSchema writer_schema = 1; + + // Required. Serialized row data in Arrow format. + ArrowRecordBatch rows = 2; + } + + // ProtoData contains the data rows and schema when constructing append + // requests. + message ProtoData { + // Optional. The protocol buffer schema used to serialize the data. Provide + // this value whenever: + // + // * You send the first request of an RPC connection. + // + // * You change the input schema. + // + // * You specify a new destination table. + ProtoSchema writer_schema = 1; + + // Required. Serialized row data in protobuf message format. + // Currently, the backend expects the serialized rows to adhere to + // proto2 semantics when appending rows, particularly with respect to + // how default values are encoded. + ProtoRows rows = 2; + } + + // An enum to indicate how to interpret missing values of fields that are + // present in user schema but missing in rows. A missing value can represent a + // NULL or a column default value defined in BigQuery table schema. + enum MissingValueInterpretation { + // Invalid missing value interpretation. Requests with this value will be + // rejected. + MISSING_VALUE_INTERPRETATION_UNSPECIFIED = 0; + + // Missing value is interpreted as NULL. + NULL_VALUE = 1; + + // Missing value is interpreted as column default value if declared in the + // table schema, NULL otherwise. + DEFAULT_VALUE = 2; + } + + // Required. The write_stream identifies the append operation. It must be + // provided in the following scenarios: + // + // * In the first request to an AppendRows connection. + // + // * In all subsequent requests to an AppendRows connection, if you use the + // same connection to write to multiple tables or change the input schema for + // default streams. + // + // For explicitly created write streams, the format is: + // + // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}` + // + // For the special default stream, the format is: + // + // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`. + // + // An example of a possible sequence of requests with write_stream fields + // within a single connection: + // + // * r1: {write_stream: stream_name_1} + // + // * r2: {write_stream: /*omit*/} + // + // * r3: {write_stream: /*omit*/} + // + // * r4: {write_stream: stream_name_2} + // + // * r5: {write_stream: stream_name_2} + // + // The destination changed in request_4, so the write_stream field must be + // populated in all subsequent requests in this stream. + string write_stream = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; + + // If present, the write is only performed if the next append offset is same + // as the provided value. If not present, the write is performed at the + // current end of stream. Specifying a value for this field is not allowed + // when calling AppendRows for the '_default' stream. + google.protobuf.Int64Value offset = 2; + + // Input rows. The `writer_schema` field must be specified at the initial + // request and currently, it will be ignored if specified in following + // requests. Following requests must have data in the same format as the + // initial request. + oneof rows { + // Rows in proto format. + ProtoData proto_rows = 4; + + // Rows in arrow format. + ArrowData arrow_rows = 5; + } + + // Id set by client to annotate its identity. Only initial request setting is + // respected. + string trace_id = 6; + + // A map to indicate how to interpret missing value for some fields. Missing + // values are fields present in user schema but missing in rows. The key is + // the field name. The value is the interpretation of missing values for the + // field. + // + // For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all + // missing values in field foo are interpreted as NULL, all missing values in + // field bar are interpreted as the default value of field bar in table + // schema. + // + // If a field is not in this map and has missing values, the missing values + // in this field are interpreted as NULL. + // + // This field only applies to the current request, it won't affect other + // requests on the connection. + // + // Currently, field name can only be top-level column name, can't be a struct + // field path like 'foo.bar'. + map missing_value_interpretations = 7; + + // Optional. Default missing value interpretation for all columns in the + // table. When a value is specified on an `AppendRowsRequest`, it is applied + // to all requests from that point forward, until a subsequent + // `AppendRowsRequest` sets it to a different value. + // `missing_value_interpretation` can override + // `default_missing_value_interpretation`. For example, if you want to write + // `NULL` instead of using default values for some columns, you can set + // `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same + // time, set `missing_value_interpretations` to `NULL_VALUE` on those columns. + MissingValueInterpretation default_missing_value_interpretation = 8 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for `AppendRows`. +message AppendRowsResponse { + // AppendResult is returned for successful append requests. + message AppendResult { + // The row offset at which the last append occurred. The offset will not be + // set if appending using default streams. + google.protobuf.Int64Value offset = 1; + } + + oneof response { + // Result if the append is successful. + AppendResult append_result = 1; + + // Error returned when problems were encountered. If present, + // it indicates rows were not accepted into the system. + // Users can retry or continue with other append requests within the + // same connection. + // + // Additional information about error signalling: + // + // ALREADY_EXISTS: Happens when an append specified an offset, and the + // backend already has received data at this offset. Typically encountered + // in retry scenarios, and can be ignored. + // + // OUT_OF_RANGE: Returned when the specified offset in the stream is beyond + // the current end of the stream. + // + // INVALID_ARGUMENT: Indicates a malformed request or data. + // + // ABORTED: Request processing is aborted because of prior failures. The + // request can be retried if previous failure is addressed. + // + // INTERNAL: Indicates server side error(s) that can be retried. + google.rpc.Status error = 2; + } + + // If backend detects a schema update, pass it to user so that user can + // use it to input new type of message. It will be empty when no schema + // updates have occurred. + TableSchema updated_schema = 3; + + // If a request failed due to corrupted rows, no rows in the batch will be + // appended. The API will return row level error info, so that the caller can + // remove the bad rows and retry the request. + repeated RowError row_errors = 4; + + // The target of the append operation. Matches the write_stream in the + // corresponding request. + string write_stream = 5; +} + +// Request message for `GetWriteStreamRequest`. +message GetWriteStreamRequest { + // Required. Name of the stream to get, in the form of + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; + + // Indicates whether to get full or partial view of the WriteStream. If + // not set, view returned will be basic. + WriteStreamView view = 3; +} + +// Request message for `BatchCommitWriteStreams`. +message BatchCommitWriteStreamsRequest { + // Required. Parent table that all the streams should belong to, in the form + // of `projects/{project}/datasets/{dataset}/tables/{table}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Required. The group of streams that will be committed atomically. + repeated string write_streams = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Response message for `BatchCommitWriteStreams`. +message BatchCommitWriteStreamsResponse { + // The time at which streams were committed in microseconds granularity. + // This field will only exist when there are no stream errors. + // **Note** if this field is not set, it means the commit was not successful. + google.protobuf.Timestamp commit_time = 1; + + // Stream level error if commit failed. Only streams with error will be in + // the list. + // If empty, there is no error and all streams are committed successfully. + // If non empty, certain streams have errors and ZERO stream is committed due + // to atomicity guarantee. + repeated StorageError stream_errors = 2; +} + +// Request message for invoking `FinalizeWriteStream`. +message FinalizeWriteStreamRequest { + // Required. Name of the stream to finalize, in the form of + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; +} + +// Response message for `FinalizeWriteStream`. +message FinalizeWriteStreamResponse { + // Number of rows in the finalized stream. + int64 row_count = 1; +} + +// Request message for `FlushRows`. +message FlushRowsRequest { + // Required. The stream that is the target of the flush operation. + string write_stream = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; + + // Ending offset of the flush operation. Rows before this offset(including + // this offset) will be flushed. + google.protobuf.Int64Value offset = 2; +} + +// Respond message for `FlushRows`. +message FlushRowsResponse { + // The rows before this offset (including this offset) are flushed. + int64 offset = 1; +} + +// Structured custom BigQuery Storage error message. The error can be attached +// as error details in the returned rpc Status. In particular, the use of error +// codes allows more structured error handling, and reduces the need to evaluate +// unstructured error text strings. +message StorageError { + // Error code for `StorageError`. + enum StorageErrorCode { + // Default error. + STORAGE_ERROR_CODE_UNSPECIFIED = 0; + + // Table is not found in the system. + TABLE_NOT_FOUND = 1; + + // Stream is already committed. + STREAM_ALREADY_COMMITTED = 2; + + // Stream is not found. + STREAM_NOT_FOUND = 3; + + // Invalid Stream type. + // For example, you try to commit a stream that is not pending. + INVALID_STREAM_TYPE = 4; + + // Invalid Stream state. + // For example, you try to commit a stream that is not finalized or is + // garbaged. + INVALID_STREAM_STATE = 5; + + // Stream is finalized. + STREAM_FINALIZED = 6; + + // There is a schema mismatch and it is caused by user schema has extra + // field than bigquery schema. + SCHEMA_MISMATCH_EXTRA_FIELDS = 7; + + // Offset already exists. + OFFSET_ALREADY_EXISTS = 8; + + // Offset out of range. + OFFSET_OUT_OF_RANGE = 9; + + // Customer-managed encryption key (CMEK) not provided for CMEK-enabled + // data. + CMEK_NOT_PROVIDED = 10; + + // Customer-managed encryption key (CMEK) was incorrectly provided. + INVALID_CMEK_PROVIDED = 11; + + // There is an encryption error while using customer-managed encryption key. + CMEK_ENCRYPTION_ERROR = 12; + + // Key Management Service (KMS) service returned an error, which can be + // retried. + KMS_SERVICE_ERROR = 13; + + // Permission denied while using customer-managed encryption key. + KMS_PERMISSION_DENIED = 14; + } + + // BigQuery Storage specific error code. + StorageErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} + +// The message that presents row level error info in a request. +message RowError { + // Error code for `RowError`. + enum RowErrorCode { + // Default error. + ROW_ERROR_CODE_UNSPECIFIED = 0; + + // One or more fields in the row has errors. + FIELDS_ERROR = 1; + } + + // Index of the malformed row in the request. + int64 index = 1; + + // Structured error reason for a row error. + RowErrorCode code = 2; + + // Description of the issue encountered when processing the row. + string message = 3; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto new file mode 100644 index 000000000000..f0d1dfef5c3e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto @@ -0,0 +1,335 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1/arrow.proto"; +import "google/cloud/bigquery/storage/v1/avro.proto"; +import "google/cloud/bigquery/storage/v1/table.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "StreamProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +// Data format for input or output data. +enum DataFormat { + // Data format is unspecified. + DATA_FORMAT_UNSPECIFIED = 0; + + // Avro is a standard open source row based file format. + // See https://avro.apache.org/ for more details. + AVRO = 1; + + // Arrow is a standard open source column-based message format. + // See https://arrow.apache.org/ for more details. + ARROW = 2; +} + +// Information about the ReadSession. +message ReadSession { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadSession" + pattern: "projects/{project}/locations/{location}/sessions/{session}" + }; + + // Additional attributes when reading a table. + message TableModifiers { + // The snapshot time of the table. If not set, interpreted as now. + google.protobuf.Timestamp snapshot_time = 1; + } + + // Options dictating how we read a table. + message TableReadOptions { + // Specifies which compression codec to attempt on the entire serialized + // response payload (either Arrow record batch or Avro rows). This is + // not to be confused with the Apache Arrow native compression codecs + // specified in ArrowSerializationOptions. For performance reasons, when + // creating a read session requesting Arrow responses, setting both native + // Arrow compression and application-level response compression will not be + // allowed - choose, at most, one kind of compression. + enum ResponseCompressionCodec { + // Default is no compression. + RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0; + + // Use raw LZ4 compression. + RESPONSE_COMPRESSION_CODEC_LZ4 = 2; + } + + // Optional. The names of the fields in the table to be returned. If no + // field names are specified, then all fields in the table are returned. + // + // Nested fields -- the child elements of a STRUCT field -- can be selected + // individually using their fully-qualified names, and will be returned as + // record fields containing only the selected nested fields. If a STRUCT + // field is specified in the selected fields list, all of the child elements + // will be returned. + // + // As an example, consider a table with the following schema: + // + // { + // "name": "struct_field", + // "type": "RECORD", + // "mode": "NULLABLE", + // "fields": [ + // { + // "name": "string_field1", + // "type": "STRING", + // . "mode": "NULLABLE" + // }, + // { + // "name": "string_field2", + // "type": "STRING", + // "mode": "NULLABLE" + // } + // ] + // } + // + // Specifying "struct_field" in the selected fields list will result in a + // read session schema with the following logical structure: + // + // struct_field { + // string_field1 + // string_field2 + // } + // + // Specifying "struct_field.string_field1" in the selected fields list will + // result in a read session schema with the following logical structure: + // + // struct_field { + // string_field1 + // } + // + // The order of the fields in the read session schema is derived from the + // table schema and does not correspond to the order in which the fields are + // specified in this list. + repeated string selected_fields = 1; + + // SQL text filtering statement, similar to a WHERE clause in a query. + // Aggregates are not supported. + // + // Examples: "int_field > 5" + // "date_field = CAST('2014-9-27' as DATE)" + // "nullable_field is not NULL" + // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" + // "numeric_field BETWEEN 1.0 AND 5.0" + // + // Restricted to a maximum length for 1 MB. + string row_restriction = 2; + + oneof output_format_serialization_options { + // Optional. Options specific to the Apache Arrow output format. + ArrowSerializationOptions arrow_serialization_options = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Options specific to the Apache Avro output format + AvroSerializationOptions avro_serialization_options = 4 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. Specifies a table sampling percentage. Specifically, the query + // planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The + // sampling percentage is applied at the data block granularity. It will + // randomly choose for each data block whether to read the rows in that data + // block. For more details, see + // https://cloud.google.com/bigquery/docs/table-sampling) + optional double sample_percentage = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Set response_compression_codec when creating a read session to + // enable application-level compression of ReadRows responses. + optional ResponseCompressionCodec response_compression_codec = 6 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Output only. Unique identifier for the session, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time at which the session becomes invalid. After this time, + // subsequent requests to read this Session will return errors. The + // expire_time is automatically assigned and currently cannot be specified or + // updated. + google.protobuf.Timestamp expire_time = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not + // supported. + DataFormat data_format = 3 [(google.api.field_behavior) = IMMUTABLE]; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. + oneof schema { + // Output only. Avro schema. + AvroSchema avro_schema = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Arrow schema. + ArrowSchema arrow_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Immutable. Table that this ReadSession is reading from, in the form + // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}` + string table = 6 [ + (google.api.field_behavior) = IMMUTABLE, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Optional. Any modifiers which are applied when reading from the specified + // table. + TableModifiers table_modifiers = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Read options for this session (e.g. column selection, filters). + TableReadOptions read_options = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. A list of streams created with the session. + // + // At least one stream is created with the session. In the future, larger + // request_stream_count values *may* result in this list being unpopulated, + // in that case, the user will need to use a List method to get the streams + // instead, which is not yet available. + repeated ReadStream streams = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. An estimate on the number of bytes this session will scan when + // all streams are completely consumed. This estimate is based on + // metadata from the table which might be incomplete or stale. + int64 estimated_total_bytes_scanned = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A pre-projected estimate of the total physical size of files + // (in bytes) that this session will scan when all streams are consumed. This + // estimate is independent of the selected columns and can be based on + // incomplete or stale metadata from the table. This field is only set for + // BigLake tables. + int64 estimated_total_physical_file_size = 15 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. An estimate on the number of rows present in this session's + // streams. This estimate is based on metadata from the table which might be + // incomplete or stale. + int64 estimated_row_count = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. ID set by client to annotate a session identity. This does not + // need to be strictly unique, but instead the same ID should be used to group + // logically connected sessions (e.g. All using the same ID for all sessions + // needed to complete a Spark SQL query is reasonable). + // + // Maximum length is 256 bytes. + string trace_id = 13 [(google.api.field_behavior) = OPTIONAL]; +} + +// Information about a single stream that gets data out of the storage system. +// Most of the information about `ReadStream` instances is aggregated, making +// `ReadStream` lightweight. +message ReadStream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadStream" + pattern: "projects/{project}/locations/{location}/sessions/{session}/streams/{stream}" + }; + + // Output only. Name of the stream, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// WriteStreamView is a view enum that controls what details about a write +// stream should be returned. +enum WriteStreamView { + // The default / unset value. + WRITE_STREAM_VIEW_UNSPECIFIED = 0; + + // The BASIC projection returns basic metadata about a write stream. The + // basic view does not include schema information. This is the default view + // returned by GetWriteStream. + BASIC = 1; + + // The FULL projection returns all available write stream metadata, including + // the schema. CreateWriteStream returns the full projection of write stream + // metadata. + FULL = 2; +} + +// Information about a single stream that gets data inside the storage system. +message WriteStream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/WriteStream" + pattern: "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}" + }; + + // Type enum of the stream. + enum Type { + // Unknown type. + TYPE_UNSPECIFIED = 0; + + // Data will commit automatically and appear as soon as the write is + // acknowledged. + COMMITTED = 1; + + // Data is invisible until the stream is committed. + PENDING = 2; + + // Data is only visible up to the offset to which it was flushed. + BUFFERED = 3; + } + + // Mode enum of the stream. + enum WriteMode { + // Unknown type. + WRITE_MODE_UNSPECIFIED = 0; + + // Insert new records into the table. + // It is the default value if customers do not specify it. + INSERT = 1; + } + + // Output only. Name of the stream, in the form + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Immutable. Type of the stream. + Type type = 2 [(google.api.field_behavior) = IMMUTABLE]; + + // Output only. Create time of the stream. For the _default stream, this is + // the creation_time of the table. + google.protobuf.Timestamp create_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Commit time of the stream. + // If a stream is of `COMMITTED` type, then it will have a commit_time same as + // `create_time`. If the stream is of `PENDING` type, empty commit_time + // means it is not committed. + google.protobuf.Timestamp commit_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The schema of the destination table. It is only returned in + // `CreateWriteStream` response. Caller should generate data that's + // compatible with this schema to send in initial `AppendRowsRequest`. + // The table schema could go out of date during the life time of the stream. + TableSchema table_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Immutable. Mode of the stream. + WriteMode write_mode = 7 [(google.api.field_behavior) = IMMUTABLE]; + + // Output only. The geographic location where the stream's dataset resides. + // See https://cloud.google.com/bigquery/docs/locations for supported + // locations. + string location = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto new file mode 100644 index 000000000000..30c30228c27c --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto @@ -0,0 +1,200 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "TableProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +// Schema of a table. This schema is a subset of +// google.cloud.bigquery.v2.TableSchema containing information necessary to +// generate valid message to write to BigQuery. +message TableSchema { + // Describes the fields in a table. + repeated TableFieldSchema fields = 1; +} + +// TableFieldSchema defines a single field/column within a table schema. +message TableFieldSchema { + enum Type { + // Illegal value + TYPE_UNSPECIFIED = 0; + + // 64K, UTF8 + STRING = 1; + + // 64-bit signed + INT64 = 2; + + // 64-bit IEEE floating point + DOUBLE = 3; + + // Aggregate type + STRUCT = 4; + + // 64K, Binary + BYTES = 5; + + // 2-valued + BOOL = 6; + + // 64-bit signed usec since UTC epoch + TIMESTAMP = 7; + + // Civil date - Year, Month, Day + DATE = 8; + + // Civil time - Hour, Minute, Second, Microseconds + TIME = 9; + + // Combination of civil date and civil time + DATETIME = 10; + + // Geography object + GEOGRAPHY = 11; + + // Numeric value + NUMERIC = 12; + + // BigNumeric value + BIGNUMERIC = 13; + + // Interval + INTERVAL = 14; + + // JSON, String + JSON = 15; + + // RANGE + RANGE = 16; + } + + enum Mode { + // Illegal value + MODE_UNSPECIFIED = 0; + + NULLABLE = 1; + + REQUIRED = 2; + + REPEATED = 3; + } + + // Represents the type of a field element. + message FieldElementType { + // Required. The type of a field element. + Type type = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // Required. The field name. The name must contain only letters (a-z, A-Z), + // numbers (0-9), or underscores (_), and must start with a letter or + // underscore. The maximum length is 128 characters. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The field data type. + Type type = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The field mode. The default value is NULLABLE. + Mode mode = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Describes the nested schema fields if the type property is set to + // STRUCT. + repeated TableFieldSchema fields = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The field description. The maximum length is 1,024 characters. + string description = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Maximum length of values of this field for STRINGS or BYTES. + // + // If max_length is not specified, no maximum length constraint is imposed + // on this field. + // + // If type = "STRING", then max_length represents the maximum UTF-8 + // length of strings in this field. + // + // If type = "BYTES", then max_length represents the maximum number of + // bytes in this field. + // + // It is invalid to set this field if type is not "STRING" or "BYTES". + int64 max_length = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Precision (maximum number of total digits in base 10) and scale + // (maximum number of digits in the fractional part in base 10) constraints + // for values of this field for NUMERIC or BIGNUMERIC. + // + // It is invalid to set precision or scale if type is not "NUMERIC" or + // "BIGNUMERIC". + // + // If precision and scale are not specified, no value range constraint is + // imposed on this field insofar as values are permitted by the type. + // + // Values of this NUMERIC or BIGNUMERIC field must be in this range when: + // + // * Precision (P) and scale (S) are specified: + // [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)] + // * Precision (P) is specified but not scale (and thus scale is + // interpreted to be equal to zero): + // [-10^P + 1, 10^P - 1]. + // + // Acceptable values for precision and scale if both are specified: + // + // * If type = "NUMERIC": + // 1 <= precision - scale <= 29 and 0 <= scale <= 9. + // * If type = "BIGNUMERIC": + // 1 <= precision - scale <= 38 and 0 <= scale <= 38. + // + // Acceptable values for precision if only precision is specified but not + // scale (and thus scale is interpreted to be equal to zero): + // + // * If type = "NUMERIC": 1 <= precision <= 29. + // * If type = "BIGNUMERIC": 1 <= precision <= 38. + // + // If scale is specified but not precision, then it is invalid. + int64 precision = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. See documentation for precision. + int64 scale = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A SQL expression to specify the [default value] + // (https://cloud.google.com/bigquery/docs/default-values) for this field. + string default_value_expression = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Precision (maximum number of total digits in base 10) for seconds + // of TIMESTAMP type. + // + // Possible values include: + // + // * 6 (Default, for TIMESTAMP type with microsecond precision) + // * 12 (For TIMESTAMP type with picosecond precision) + google.protobuf.Int64Value timestamp_precision = 27 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The subtype of the RANGE, if the type of this field is RANGE. If + // the type is RANGE, this field is required. Possible values for the field + // element type of a RANGE include: + // * DATE + // * DATETIME + // * TIMESTAMP + FieldElementType range_element_type = 11 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/clirr-ignored-differences.xml b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/clirr-ignored-differences.xml new file mode 100644 index 000000000000..73d4983526f0 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/clirr-ignored-differences.xml @@ -0,0 +1,81 @@ + + + + + 7012 + com/google/cloud/bigquery/storage/v1alpha/*OrBuilder + * get*(*) + + + 7012 + com/google/cloud/bigquery/storage/v1alpha/*OrBuilder + boolean contains*(*) + + + 7012 + com/google/cloud/bigquery/storage/v1alpha/*OrBuilder + boolean has*(*) + + + + + 7006 + com/google/cloud/bigquery/storage/v1alpha/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/cloud/bigquery/storage/v1alpha/** + * addRepeatedField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1alpha/** + * clear() + ** + + + 7006 + com/google/cloud/bigquery/storage/v1alpha/** + * clearField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1alpha/** + * clearOneof(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1alpha/** + * clone() + ** + + + 7006 + com/google/cloud/bigquery/storage/v1alpha/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1alpha/** + * setField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1alpha/** + * setRepeatedField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1alpha/** + * setUnknownFields(*) + ** + + diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/pom.xml b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/pom.xml new file mode 100644 index 000000000000..057a6ff6389d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/pom.xml @@ -0,0 +1,41 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1alpha + 3.19.1 + proto-google-cloud-bigquerystorage-v1alpha + Proto library for google-cloud-bigquerystorage + + com.google.cloud + google-cloud-bigquerystorage-parent + 3.19.1 + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api + api-common + + + com.google.guava + guava + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequest.java new file mode 100644 index 000000000000..d326a5ffe73c --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequest.java @@ -0,0 +1,1623 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Request message for BatchCreateMetastorePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest} + */ +public final class BatchCreateMetastorePartitionsRequest + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest) + BatchCreateMetastorePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchCreateMetastorePartitionsRequest.newBuilder() to construct. + private BatchCreateMetastorePartitionsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCreateMetastorePartitionsRequest() { + parent_ = ""; + requests_ = java.util.Collections.emptyList(); + traceId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCreateMetastorePartitionsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest.Builder + .class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partitions to be
+   * added, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partitions to be
+   * added, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUESTS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List + requests_; + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getRequestsList() { + return requests_; + } + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + ? extends + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequestOrBuilder> + getRequestsOrBuilderList() { + return requests_; + } + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getRequestsCount() { + return requests_.size(); + } + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest getRequests( + int index) { + return requests_.get(index); + } + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequestOrBuilder + getRequestsOrBuilder(int index) { + return requests_.get(index); + } + + public static final int SKIP_EXISTING_PARTITIONS_FIELD_NUMBER = 3; + private boolean skipExistingPartitions_ = false; + + /** + * + * + *
+   * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+   * add_partitions(..). If the flag is set to false, the server will return
+   * ALREADY_EXISTS if any partition already exists. If the flag is set to true,
+   * the server will skip existing partitions and insert only the non-existing
+   * partitions. A maximum of 900 partitions can be inserted in a batch.
+   * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + @java.lang.Override + public boolean getSkipExistingPartitions() { + return skipExistingPartitions_; + } + + public static final int TRACE_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object traceId_ = ""; + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + @java.lang.Override + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + for (int i = 0; i < requests_.size(); i++) { + output.writeMessage(2, requests_.get(i)); + } + if (skipExistingPartitions_ != false) { + output.writeBool(3, skipExistingPartitions_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, traceId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + for (int i = 0; i < requests_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, requests_.get(i)); + } + if (skipExistingPartitions_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, skipExistingPartitions_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, traceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest other = + (com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getRequestsList().equals(other.getRequestsList())) return false; + if (getSkipExistingPartitions() != other.getSkipExistingPartitions()) return false; + if (!getTraceId().equals(other.getTraceId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getRequestsCount() > 0) { + hash = (37 * hash) + REQUESTS_FIELD_NUMBER; + hash = (53 * hash) + getRequestsList().hashCode(); + } + hash = (37 * hash) + SKIP_EXISTING_PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSkipExistingPartitions()); + hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTraceId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for BatchCreateMetastorePartitions.
+   * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest) + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + .Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + if (requestsBuilder_ == null) { + requests_ = java.util.Collections.emptyList(); + } else { + requests_ = null; + requestsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + skipExistingPartitions_ = false; + traceId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest build() { + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest result = + new com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest result) { + if (requestsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + requests_ = java.util.Collections.unmodifiableList(requests_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.requests_ = requests_; + } else { + result.requests_ = requestsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.skipExistingPartitions_ = skipExistingPartitions_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.traceId_ = traceId_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (requestsBuilder_ == null) { + if (!other.requests_.isEmpty()) { + if (requests_.isEmpty()) { + requests_ = other.requests_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRequestsIsMutable(); + requests_.addAll(other.requests_); + } + onChanged(); + } + } else { + if (!other.requests_.isEmpty()) { + if (requestsBuilder_.isEmpty()) { + requestsBuilder_.dispose(); + requestsBuilder_ = null; + requests_ = other.requests_; + bitField0_ = (bitField0_ & ~0x00000002); + requestsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getRequestsFieldBuilder() + : null; + } else { + requestsBuilder_.addAllMessages(other.requests_); + } + } + } + if (other.getSkipExistingPartitions() != false) { + setSkipExistingPartitions(other.getSkipExistingPartitions()); + } + if (!other.getTraceId().isEmpty()) { + traceId_ = other.traceId_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest m = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest + .parser(), + extensionRegistry); + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(m); + } else { + requestsBuilder_.addMessage(m); + } + break; + } // case 18 + case 24: + { + skipExistingPartitions_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + traceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partitions to be
+     * added, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partitions to be
+     * added, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partitions to be
+     * added, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partitions to be
+     * added, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partitions to be
+     * added, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List< + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest> + requests_ = java.util.Collections.emptyList(); + + private void ensureRequestsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + requests_ = + new java.util.ArrayList< + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest>( + requests_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.Builder, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequestOrBuilder> + requestsBuilder_; + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getRequestsList() { + if (requestsBuilder_ == null) { + return java.util.Collections.unmodifiableList(requests_); + } else { + return requestsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getRequestsCount() { + if (requestsBuilder_ == null) { + return requests_.size(); + } else { + return requestsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest getRequests( + int index) { + if (requestsBuilder_ == null) { + return requests_.get(index); + } else { + return requestsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setRequests( + int index, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.set(index, value); + onChanged(); + } else { + requestsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setRequests( + int index, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.Builder + builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.set(index, builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.add(value); + onChanged(); + } else { + requestsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + int index, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.add(index, value); + onChanged(); + } else { + requestsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.Builder + builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + int index, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.Builder + builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(index, builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllRequests( + java.lang.Iterable< + ? extends com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest> + values) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, requests_); + onChanged(); + } else { + requestsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearRequests() { + if (requestsBuilder_ == null) { + requests_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + requestsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeRequests(int index) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.remove(index); + onChanged(); + } else { + requestsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.Builder + getRequestsBuilder(int index) { + return getRequestsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequestOrBuilder + getRequestsOrBuilder(int index) { + if (requestsBuilder_ == null) { + return requests_.get(index); + } else { + return requestsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + ? extends + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequestOrBuilder> + getRequestsOrBuilderList() { + if (requestsBuilder_ != null) { + return requestsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(requests_); + } + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.Builder + addRequestsBuilder() { + return getRequestsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest + .getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.Builder + addRequestsBuilder(int index) { + return getRequestsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest + .getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.Builder> + getRequestsBuilderList() { + return getRequestsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.Builder, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequestOrBuilder> + getRequestsFieldBuilder() { + if (requestsBuilder_ == null) { + requestsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.Builder, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequestOrBuilder>( + requests_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + requests_ = null; + } + return requestsBuilder_; + } + + private boolean skipExistingPartitions_; + + /** + * + * + *
+     * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+     * add_partitions(..). If the flag is set to false, the server will return
+     * ALREADY_EXISTS if any partition already exists. If the flag is set to true,
+     * the server will skip existing partitions and insert only the non-existing
+     * partitions. A maximum of 900 partitions can be inserted in a batch.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + @java.lang.Override + public boolean getSkipExistingPartitions() { + return skipExistingPartitions_; + } + + /** + * + * + *
+     * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+     * add_partitions(..). If the flag is set to false, the server will return
+     * ALREADY_EXISTS if any partition already exists. If the flag is set to true,
+     * the server will skip existing partitions and insert only the non-existing
+     * partitions. A maximum of 900 partitions can be inserted in a batch.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The skipExistingPartitions to set. + * @return This builder for chaining. + */ + public Builder setSkipExistingPartitions(boolean value) { + + skipExistingPartitions_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+     * add_partitions(..). If the flag is set to false, the server will return
+     * ALREADY_EXISTS if any partition already exists. If the flag is set to true,
+     * the server will skip existing partitions and insert only the non-existing
+     * partitions. A maximum of 900 partitions can be inserted in a batch.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearSkipExistingPartitions() { + bitField0_ = (bitField0_ & ~0x00000004); + skipExistingPartitions_ = false; + onChanged(); + return this; + } + + private java.lang.Object traceId_ = ""; + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + traceId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearTraceId() { + traceId_ = getDefaultInstance().getTraceId(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + traceId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest) + private static final com.google.cloud.bigquery.storage.v1alpha + .BatchCreateMetastorePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCreateMetastorePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..bf0d2edfcf2a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequestOrBuilder.java @@ -0,0 +1,181 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface BatchCreateMetastorePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partitions to be
+   * added, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partitions to be
+   * added, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getRequestsList(); + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest getRequests(int index); + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getRequestsCount(); + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List< + ? extends + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequestOrBuilder> + getRequestsOrBuilderList(); + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequestOrBuilder + getRequestsOrBuilder(int index); + + /** + * + * + *
+   * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+   * add_partitions(..). If the flag is set to false, the server will return
+   * ALREADY_EXISTS if any partition already exists. If the flag is set to true,
+   * the server will skip existing partitions and insert only the non-existing
+   * partitions. A maximum of 900 partitions can be inserted in a batch.
+   * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + boolean getSkipExistingPartitions(); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + java.lang.String getTraceId(); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + com.google.protobuf.ByteString getTraceIdBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponse.java new file mode 100644 index 000000000000..aaf9aa1d52f9 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponse.java @@ -0,0 +1,1035 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Response message for BatchCreateMetastorePartitions.
+ * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse} + */ +public final class BatchCreateMetastorePartitionsResponse + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse) + BatchCreateMetastorePartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchCreateMetastorePartitionsResponse.newBuilder() to construct. + private BatchCreateMetastorePartitionsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCreateMetastorePartitionsResponse() { + partitions_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCreateMetastorePartitionsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse.Builder + .class); + } + + public static final int PARTITIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List partitions_; + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + @java.lang.Override + public java.util.List + getPartitionsList() { + return partitions_; + } + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getPartitionsOrBuilderList() { + return partitions_; + } + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + @java.lang.Override + public int getPartitionsCount() { + return partitions_.size(); + } + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getPartitions(int index) { + return partitions_.get(index); + } + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getPartitionsOrBuilder(int index) { + return partitions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < partitions_.size(); i++) { + output.writeMessage(1, partitions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < partitions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, partitions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse other = + (com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse) obj; + + if (!getPartitionsList().equals(other.getPartitionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPartitionsCount() > 0) { + hash = (37 * hash) + PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for BatchCreateMetastorePartitions.
+   * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse) + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + .class, + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + .Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + } else { + partitions_ = null; + partitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + build() { + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse result = + new com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse( + this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse result) { + if (partitionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + partitions_ = java.util.Collections.unmodifiableList(partitions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.partitions_ = partitions_; + } else { + result.partitions_ = partitionsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + .getDefaultInstance()) return this; + if (partitionsBuilder_ == null) { + if (!other.partitions_.isEmpty()) { + if (partitions_.isEmpty()) { + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePartitionsIsMutable(); + partitions_.addAll(other.partitions_); + } + onChanged(); + } + } else { + if (!other.partitions_.isEmpty()) { + if (partitionsBuilder_.isEmpty()) { + partitionsBuilder_.dispose(); + partitionsBuilder_ = null; + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + partitionsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getPartitionsFieldBuilder() + : null; + } else { + partitionsBuilder_.addAllMessages(other.partitions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition m = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.parser(), + extensionRegistry); + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(m); + } else { + partitionsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List + partitions_ = java.util.Collections.emptyList(); + + private void ensurePartitionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + partitions_ = + new java.util.ArrayList( + partitions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + partitionsBuilder_; + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public java.util.List + getPartitionsList() { + if (partitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitions_); + } else { + return partitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public int getPartitionsCount() { + if (partitionsBuilder_ == null) { + return partitions_.size(); + } else { + return partitionsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getPartitions(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder setPartitions( + int index, com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.set(index, value); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder setPartitions( + int index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(value); + onChanged(); + } else { + partitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + int index, com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(index, value); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + int index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder addAllPartitions( + java.lang.Iterable + values) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitions_); + onChanged(); + } else { + partitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder clearPartitions() { + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + partitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder removePartitions(int index) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.remove(index); + onChanged(); + } else { + partitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + getPartitionsBuilder(int index) { + return getPartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getPartitionsOrBuilder(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getPartitionsOrBuilderList() { + if (partitionsBuilder_ != null) { + return partitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitions_); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + addPartitionsBuilder() { + return getPartitionsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + addPartitionsBuilder(int index) { + return getPartitionsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public java.util.List + getPartitionsBuilderList() { + return getPartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getPartitionsFieldBuilder() { + if (partitionsBuilder_ == null) { + partitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder>( + partitions_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + partitions_ = null; + } + return partitionsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse) + private static final com.google.cloud.bigquery.storage.v1alpha + .BatchCreateMetastorePartitionsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCreateMetastorePartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponseOrBuilder.java new file mode 100644 index 000000000000..8833e4756465 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponseOrBuilder.java @@ -0,0 +1,83 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface BatchCreateMetastorePartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.BatchCreateMetastorePartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + java.util.List getPartitionsList(); + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getPartitions(int index); + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + int getPartitionsCount(); + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + java.util.List + getPartitionsOrBuilderList(); + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder getPartitionsOrBuilder( + int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequest.java new file mode 100644 index 000000000000..e085da0346e1 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequest.java @@ -0,0 +1,1534 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Request message for BatchDeleteMetastorePartitions. The MetastorePartition is
+ * uniquely identified by values, which is an ordered list. Hence, there is no
+ * separate name or partition id field.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest} + */ +public final class BatchDeleteMetastorePartitionsRequest + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest) + BatchDeleteMetastorePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchDeleteMetastorePartitionsRequest.newBuilder() to construct. + private BatchDeleteMetastorePartitionsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchDeleteMetastorePartitionsRequest() { + parent_ = ""; + partitionValues_ = java.util.Collections.emptyList(); + traceId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchDeleteMetastorePartitionsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchDeleteMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchDeleteMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest.Builder + .class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARTITION_VALUES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List + partitionValues_; + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getPartitionValuesList() { + return partitionValues_; + } + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValuesOrBuilder> + getPartitionValuesOrBuilderList() { + return partitionValues_; + } + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getPartitionValuesCount() { + return partitionValues_.size(); + } + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues getPartitionValues( + int index) { + return partitionValues_.get(index); + } + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValuesOrBuilder + getPartitionValuesOrBuilder(int index) { + return partitionValues_.get(index); + } + + public static final int TRACE_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object traceId_ = ""; + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + @java.lang.Override + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + for (int i = 0; i < partitionValues_.size(); i++) { + output.writeMessage(2, partitionValues_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, traceId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + for (int i = 0; i < partitionValues_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, partitionValues_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, traceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest other = + (com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getPartitionValuesList().equals(other.getPartitionValuesList())) return false; + if (!getTraceId().equals(other.getTraceId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getPartitionValuesCount() > 0) { + hash = (37 * hash) + PARTITION_VALUES_FIELD_NUMBER; + hash = (53 * hash) + getPartitionValuesList().hashCode(); + } + hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTraceId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for BatchDeleteMetastorePartitions. The MetastorePartition is
+   * uniquely identified by values, which is an ordered list. Hence, there is no
+   * separate name or partition id field.
+   * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest) + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchDeleteMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchDeleteMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + .Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + if (partitionValuesBuilder_ == null) { + partitionValues_ = java.util.Collections.emptyList(); + } else { + partitionValues_ = null; + partitionValuesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + traceId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchDeleteMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest build() { + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest result = + new com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest result) { + if (partitionValuesBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + partitionValues_ = java.util.Collections.unmodifiableList(partitionValues_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.partitionValues_ = partitionValues_; + } else { + result.partitionValues_ = partitionValuesBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.traceId_ = traceId_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (partitionValuesBuilder_ == null) { + if (!other.partitionValues_.isEmpty()) { + if (partitionValues_.isEmpty()) { + partitionValues_ = other.partitionValues_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensurePartitionValuesIsMutable(); + partitionValues_.addAll(other.partitionValues_); + } + onChanged(); + } + } else { + if (!other.partitionValues_.isEmpty()) { + if (partitionValuesBuilder_.isEmpty()) { + partitionValuesBuilder_.dispose(); + partitionValuesBuilder_ = null; + partitionValues_ = other.partitionValues_; + bitField0_ = (bitField0_ & ~0x00000002); + partitionValuesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getPartitionValuesFieldBuilder() + : null; + } else { + partitionValuesBuilder_.addAllMessages(other.partitionValues_); + } + } + } + if (!other.getTraceId().isEmpty()) { + traceId_ = other.traceId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues m = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.parser(), + extensionRegistry); + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.add(m); + } else { + partitionValuesBuilder_.addMessage(m); + } + break; + } // case 18 + case 34: + { + traceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List + partitionValues_ = java.util.Collections.emptyList(); + + private void ensurePartitionValuesIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + partitionValues_ = + new java.util.ArrayList< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues>( + partitionValues_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValuesOrBuilder> + partitionValuesBuilder_; + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getPartitionValuesList() { + if (partitionValuesBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitionValues_); + } else { + return partitionValuesBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getPartitionValuesCount() { + if (partitionValuesBuilder_ == null) { + return partitionValues_.size(); + } else { + return partitionValuesBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues getPartitionValues( + int index) { + if (partitionValuesBuilder_ == null) { + return partitionValues_.get(index); + } else { + return partitionValuesBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartitionValues( + int index, com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues value) { + if (partitionValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionValuesIsMutable(); + partitionValues_.set(index, value); + onChanged(); + } else { + partitionValuesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartitionValues( + int index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.Builder + builderForValue) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionValuesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitionValues( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues value) { + if (partitionValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionValuesIsMutable(); + partitionValues_.add(value); + onChanged(); + } else { + partitionValuesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitionValues( + int index, com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues value) { + if (partitionValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionValuesIsMutable(); + partitionValues_.add(index, value); + onChanged(); + } else { + partitionValuesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitionValues( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.Builder + builderForValue) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.add(builderForValue.build()); + onChanged(); + } else { + partitionValuesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitionValues( + int index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.Builder + builderForValue) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionValuesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllPartitionValues( + java.lang.Iterable< + ? extends com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues> + values) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitionValues_); + onChanged(); + } else { + partitionValuesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearPartitionValues() { + if (partitionValuesBuilder_ == null) { + partitionValues_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + partitionValuesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removePartitionValues(int index) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.remove(index); + onChanged(); + } else { + partitionValuesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.Builder + getPartitionValuesBuilder(int index) { + return getPartitionValuesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValuesOrBuilder + getPartitionValuesOrBuilder(int index) { + if (partitionValuesBuilder_ == null) { + return partitionValues_.get(index); + } else { + return partitionValuesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValuesOrBuilder> + getPartitionValuesOrBuilderList() { + if (partitionValuesBuilder_ != null) { + return partitionValuesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitionValues_); + } + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.Builder + addPartitionValuesBuilder() { + return getPartitionValuesFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues + .getDefaultInstance()); + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.Builder + addPartitionValuesBuilder(int index) { + return getPartitionValuesFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues + .getDefaultInstance()); + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.Builder> + getPartitionValuesBuilderList() { + return getPartitionValuesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValuesOrBuilder> + getPartitionValuesFieldBuilder() { + if (partitionValuesBuilder_ == null) { + partitionValuesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValuesOrBuilder>( + partitionValues_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + partitionValues_ = null; + } + return partitionValuesBuilder_; + } + + private java.lang.Object traceId_ = ""; + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + traceId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearTraceId() { + traceId_ = getDefaultInstance().getTraceId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + traceId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest) + private static final com.google.cloud.bigquery.storage.v1alpha + .BatchDeleteMetastorePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchDeleteMetastorePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..e16d0c3a73e2 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequestOrBuilder.java @@ -0,0 +1,168 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface BatchDeleteMetastorePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.BatchDeleteMetastorePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getPartitionValuesList(); + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues getPartitionValues(int index); + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getPartitionValuesCount(); + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValuesOrBuilder> + getPartitionValuesOrBuilderList(); + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValuesOrBuilder + getPartitionValuesOrBuilder(int index); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + java.lang.String getTraceId(); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + com.google.protobuf.ByteString getTraceIdBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeError.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeError.java new file mode 100644 index 000000000000..4ef0e01049fe --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeError.java @@ -0,0 +1,747 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Structured custom error message for batch size too large error.
+ * The error can be attached as error details in the returned rpc Status for
+ * more structured error handling in the client.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError} + */ +public final class BatchSizeTooLargeError extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError) + BatchSizeTooLargeErrorOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchSizeTooLargeError.newBuilder() to construct. + private BatchSizeTooLargeError(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchSizeTooLargeError() { + errorMessage_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchSizeTooLargeError(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchSizeTooLargeError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchSizeTooLargeError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError.class, + com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError.Builder.class); + } + + public static final int MAX_BATCH_SIZE_FIELD_NUMBER = 1; + private long maxBatchSize_ = 0L; + + /** + * + * + *
+   * The maximum number of items that are supported in a single batch. This is
+   * returned as a hint to the client to adjust the batch size.
+   * 
+ * + * int64 max_batch_size = 1; + * + * @return The maxBatchSize. + */ + @java.lang.Override + public long getMaxBatchSize() { + return maxBatchSize_; + } + + public static final int ERROR_MESSAGE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object errorMessage_ = ""; + + /** + * + * + *
+   * Optional. The error message that is returned to the client.
+   * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The errorMessage. + */ + @java.lang.Override + public java.lang.String getErrorMessage() { + java.lang.Object ref = errorMessage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + errorMessage_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The error message that is returned to the client.
+   * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for errorMessage. + */ + @java.lang.Override + public com.google.protobuf.ByteString getErrorMessageBytes() { + java.lang.Object ref = errorMessage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + errorMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (maxBatchSize_ != 0L) { + output.writeInt64(1, maxBatchSize_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(errorMessage_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, errorMessage_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (maxBatchSize_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, maxBatchSize_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(errorMessage_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, errorMessage_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError other = + (com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError) obj; + + if (getMaxBatchSize() != other.getMaxBatchSize()) return false; + if (!getErrorMessage().equals(other.getErrorMessage())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MAX_BATCH_SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMaxBatchSize()); + hash = (37 * hash) + ERROR_MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getErrorMessage().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Structured custom error message for batch size too large error.
+   * The error can be attached as error details in the returned rpc Status for
+   * more structured error handling in the client.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError) + com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeErrorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchSizeTooLargeError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchSizeTooLargeError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError.class, + com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + maxBatchSize_ = 0L; + errorMessage_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchSizeTooLargeError_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError build() { + com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError result = + new com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.maxBatchSize_ = maxBatchSize_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.errorMessage_ = errorMessage_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError) { + return mergeFrom((com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError.getDefaultInstance()) + return this; + if (other.getMaxBatchSize() != 0L) { + setMaxBatchSize(other.getMaxBatchSize()); + } + if (!other.getErrorMessage().isEmpty()) { + errorMessage_ = other.errorMessage_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + maxBatchSize_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + errorMessage_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long maxBatchSize_; + + /** + * + * + *
+     * The maximum number of items that are supported in a single batch. This is
+     * returned as a hint to the client to adjust the batch size.
+     * 
+ * + * int64 max_batch_size = 1; + * + * @return The maxBatchSize. + */ + @java.lang.Override + public long getMaxBatchSize() { + return maxBatchSize_; + } + + /** + * + * + *
+     * The maximum number of items that are supported in a single batch. This is
+     * returned as a hint to the client to adjust the batch size.
+     * 
+ * + * int64 max_batch_size = 1; + * + * @param value The maxBatchSize to set. + * @return This builder for chaining. + */ + public Builder setMaxBatchSize(long value) { + + maxBatchSize_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The maximum number of items that are supported in a single batch. This is
+     * returned as a hint to the client to adjust the batch size.
+     * 
+ * + * int64 max_batch_size = 1; + * + * @return This builder for chaining. + */ + public Builder clearMaxBatchSize() { + bitField0_ = (bitField0_ & ~0x00000001); + maxBatchSize_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object errorMessage_ = ""; + + /** + * + * + *
+     * Optional. The error message that is returned to the client.
+     * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The errorMessage. + */ + public java.lang.String getErrorMessage() { + java.lang.Object ref = errorMessage_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + errorMessage_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The error message that is returned to the client.
+     * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for errorMessage. + */ + public com.google.protobuf.ByteString getErrorMessageBytes() { + java.lang.Object ref = errorMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + errorMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The error message that is returned to the client.
+     * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The errorMessage to set. + * @return This builder for chaining. + */ + public Builder setErrorMessage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + errorMessage_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The error message that is returned to the client.
+     * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearErrorMessage() { + errorMessage_ = getDefaultInstance().getErrorMessage(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The error message that is returned to the client.
+     * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for errorMessage to set. + * @return This builder for chaining. + */ + public Builder setErrorMessageBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + errorMessage_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError) + private static final com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchSizeTooLargeError parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeErrorOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeErrorOrBuilder.java new file mode 100644 index 000000000000..b46f09f25338 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeErrorOrBuilder.java @@ -0,0 +1,66 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface BatchSizeTooLargeErrorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.BatchSizeTooLargeError) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The maximum number of items that are supported in a single batch. This is
+   * returned as a hint to the client to adjust the batch size.
+   * 
+ * + * int64 max_batch_size = 1; + * + * @return The maxBatchSize. + */ + long getMaxBatchSize(); + + /** + * + * + *
+   * Optional. The error message that is returned to the client.
+   * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The errorMessage. + */ + java.lang.String getErrorMessage(); + + /** + * + * + *
+   * Optional. The error message that is returned to the client.
+   * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for errorMessage. + */ + com.google.protobuf.ByteString getErrorMessageBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequest.java new file mode 100644 index 000000000000..b6a7cd10efe8 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequest.java @@ -0,0 +1,1510 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Request message for BatchUpdateMetastorePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest} + */ +public final class BatchUpdateMetastorePartitionsRequest + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest) + BatchUpdateMetastorePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchUpdateMetastorePartitionsRequest.newBuilder() to construct. + private BatchUpdateMetastorePartitionsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchUpdateMetastorePartitionsRequest() { + parent_ = ""; + requests_ = java.util.Collections.emptyList(); + traceId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchUpdateMetastorePartitionsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest.Builder + .class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUESTS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List + requests_; + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getRequestsList() { + return requests_; + } + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + ? extends + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequestOrBuilder> + getRequestsOrBuilderList() { + return requests_; + } + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getRequestsCount() { + return requests_.size(); + } + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest getRequests( + int index) { + return requests_.get(index); + } + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequestOrBuilder + getRequestsOrBuilder(int index) { + return requests_.get(index); + } + + public static final int TRACE_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object traceId_ = ""; + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + @java.lang.Override + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + for (int i = 0; i < requests_.size(); i++) { + output.writeMessage(2, requests_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, traceId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + for (int i = 0; i < requests_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, requests_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, traceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest other = + (com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getRequestsList().equals(other.getRequestsList())) return false; + if (!getTraceId().equals(other.getTraceId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getRequestsCount() > 0) { + hash = (37 * hash) + REQUESTS_FIELD_NUMBER; + hash = (53 * hash) + getRequestsList().hashCode(); + } + hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTraceId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for BatchUpdateMetastorePartitions.
+   * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest) + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + .Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + if (requestsBuilder_ == null) { + requests_ = java.util.Collections.emptyList(); + } else { + requests_ = null; + requestsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + traceId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest build() { + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest result = + new com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest result) { + if (requestsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + requests_ = java.util.Collections.unmodifiableList(requests_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.requests_ = requests_; + } else { + result.requests_ = requestsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.traceId_ = traceId_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (requestsBuilder_ == null) { + if (!other.requests_.isEmpty()) { + if (requests_.isEmpty()) { + requests_ = other.requests_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRequestsIsMutable(); + requests_.addAll(other.requests_); + } + onChanged(); + } + } else { + if (!other.requests_.isEmpty()) { + if (requestsBuilder_.isEmpty()) { + requestsBuilder_.dispose(); + requestsBuilder_ = null; + requests_ = other.requests_; + bitField0_ = (bitField0_ & ~0x00000002); + requestsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getRequestsFieldBuilder() + : null; + } else { + requestsBuilder_.addAllMessages(other.requests_); + } + } + } + if (!other.getTraceId().isEmpty()) { + traceId_ = other.traceId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest m = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest + .parser(), + extensionRegistry); + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(m); + } else { + requestsBuilder_.addMessage(m); + } + break; + } // case 18 + case 34: + { + traceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List< + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest> + requests_ = java.util.Collections.emptyList(); + + private void ensureRequestsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + requests_ = + new java.util.ArrayList< + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest>( + requests_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.Builder, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequestOrBuilder> + requestsBuilder_; + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getRequestsList() { + if (requestsBuilder_ == null) { + return java.util.Collections.unmodifiableList(requests_); + } else { + return requestsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getRequestsCount() { + if (requestsBuilder_ == null) { + return requests_.size(); + } else { + return requestsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest getRequests( + int index) { + if (requestsBuilder_ == null) { + return requests_.get(index); + } else { + return requestsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setRequests( + int index, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.set(index, value); + onChanged(); + } else { + requestsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setRequests( + int index, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.Builder + builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.set(index, builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.add(value); + onChanged(); + } else { + requestsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + int index, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.add(index, value); + onChanged(); + } else { + requestsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.Builder + builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + int index, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.Builder + builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(index, builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllRequests( + java.lang.Iterable< + ? extends com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest> + values) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, requests_); + onChanged(); + } else { + requestsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearRequests() { + if (requestsBuilder_ == null) { + requests_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + requestsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeRequests(int index) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.remove(index); + onChanged(); + } else { + requestsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.Builder + getRequestsBuilder(int index) { + return getRequestsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequestOrBuilder + getRequestsOrBuilder(int index) { + if (requestsBuilder_ == null) { + return requests_.get(index); + } else { + return requestsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + ? extends + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequestOrBuilder> + getRequestsOrBuilderList() { + if (requestsBuilder_ != null) { + return requestsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(requests_); + } + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.Builder + addRequestsBuilder() { + return getRequestsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest + .getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.Builder + addRequestsBuilder(int index) { + return getRequestsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest + .getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.Builder> + getRequestsBuilderList() { + return getRequestsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.Builder, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequestOrBuilder> + getRequestsFieldBuilder() { + if (requestsBuilder_ == null) { + requestsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.Builder, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequestOrBuilder>( + requests_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + requests_ = null; + } + return requestsBuilder_; + } + + private java.lang.Object traceId_ = ""; + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + traceId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearTraceId() { + traceId_ = getDefaultInstance().getTraceId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + traceId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest) + private static final com.google.cloud.bigquery.storage.v1alpha + .BatchUpdateMetastorePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchUpdateMetastorePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..bf6df560ce70 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequestOrBuilder.java @@ -0,0 +1,164 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface BatchUpdateMetastorePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getRequestsList(); + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest getRequests(int index); + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getRequestsCount(); + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List< + ? extends + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequestOrBuilder> + getRequestsOrBuilderList(); + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequestOrBuilder + getRequestsOrBuilder(int index); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + java.lang.String getTraceId(); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + com.google.protobuf.ByteString getTraceIdBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponse.java new file mode 100644 index 000000000000..7d7b06233c64 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponse.java @@ -0,0 +1,1058 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Response message for BatchUpdateMetastorePartitions.
+ * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse} + */ +public final class BatchUpdateMetastorePartitionsResponse + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse) + BatchUpdateMetastorePartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchUpdateMetastorePartitionsResponse.newBuilder() to construct. + private BatchUpdateMetastorePartitionsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchUpdateMetastorePartitionsResponse() { + partitions_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchUpdateMetastorePartitionsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse.Builder + .class); + } + + public static final int PARTITIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List partitions_; + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + @java.lang.Override + public java.util.List + getPartitionsList() { + return partitions_; + } + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getPartitionsOrBuilderList() { + return partitions_; + } + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + @java.lang.Override + public int getPartitionsCount() { + return partitions_.size(); + } + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getPartitions(int index) { + return partitions_.get(index); + } + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getPartitionsOrBuilder(int index) { + return partitions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < partitions_.size(); i++) { + output.writeMessage(1, partitions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < partitions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, partitions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse other = + (com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse) obj; + + if (!getPartitionsList().equals(other.getPartitionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPartitionsCount() > 0) { + hash = (37 * hash) + PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for BatchUpdateMetastorePartitions.
+   * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse) + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + .class, + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + .Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + } else { + partitions_ = null; + partitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + build() { + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse result = + new com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse( + this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse result) { + if (partitionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + partitions_ = java.util.Collections.unmodifiableList(partitions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.partitions_ = partitions_; + } else { + result.partitions_ = partitionsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + .getDefaultInstance()) return this; + if (partitionsBuilder_ == null) { + if (!other.partitions_.isEmpty()) { + if (partitions_.isEmpty()) { + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePartitionsIsMutable(); + partitions_.addAll(other.partitions_); + } + onChanged(); + } + } else { + if (!other.partitions_.isEmpty()) { + if (partitionsBuilder_.isEmpty()) { + partitionsBuilder_.dispose(); + partitionsBuilder_ = null; + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + partitionsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getPartitionsFieldBuilder() + : null; + } else { + partitionsBuilder_.addAllMessages(other.partitions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition m = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.parser(), + extensionRegistry); + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(m); + } else { + partitionsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List + partitions_ = java.util.Collections.emptyList(); + + private void ensurePartitionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + partitions_ = + new java.util.ArrayList( + partitions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + partitionsBuilder_; + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public java.util.List + getPartitionsList() { + if (partitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitions_); + } else { + return partitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public int getPartitionsCount() { + if (partitionsBuilder_ == null) { + return partitions_.size(); + } else { + return partitionsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getPartitions(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder setPartitions( + int index, com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.set(index, value); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder setPartitions( + int index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(value); + onChanged(); + } else { + partitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + int index, com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(index, value); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + int index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder addAllPartitions( + java.lang.Iterable + values) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitions_); + onChanged(); + } else { + partitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder clearPartitions() { + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + partitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public Builder removePartitions(int index) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.remove(index); + onChanged(); + } else { + partitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + getPartitionsBuilder(int index) { + return getPartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getPartitionsOrBuilder(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getPartitionsOrBuilderList() { + if (partitionsBuilder_ != null) { + return partitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitions_); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + addPartitionsBuilder() { + return getPartitionsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + addPartitionsBuilder(int index) { + return getPartitionsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + * + */ + public java.util.List + getPartitionsBuilderList() { + return getPartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getPartitionsFieldBuilder() { + if (partitionsBuilder_ == null) { + partitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder>( + partitions_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + partitions_ = null; + } + return partitionsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse) + private static final com.google.cloud.bigquery.storage.v1alpha + .BatchUpdateMetastorePartitionsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchUpdateMetastorePartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponseOrBuilder.java new file mode 100644 index 000000000000..89418ed69c6e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponseOrBuilder.java @@ -0,0 +1,88 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface BatchUpdateMetastorePartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.BatchUpdateMetastorePartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + java.util.List getPartitionsList(); + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getPartitions(int index); + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + int getPartitionsCount(); + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + java.util.List + getPartitionsOrBuilderList(); + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1; + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder getPartitionsOrBuilder( + int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequest.java new file mode 100644 index 000000000000..373368a52802 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequest.java @@ -0,0 +1,1013 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Request message for CreateMetastorePartition. The MetastorePartition is
+ * uniquely identified by values, which is an ordered list. Hence, there is no
+ * separate name or partition id field.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest} + */ +public final class CreateMetastorePartitionRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest) + CreateMetastorePartitionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use CreateMetastorePartitionRequest.newBuilder() to construct. + private CreateMetastorePartitionRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateMetastorePartitionRequest() { + parent_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateMetastorePartitionRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_CreateMetastorePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_CreateMetastorePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.class, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.Builder + .class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partition to be
+   * added, in the format of
+   * projects/{project}/databases/{databases}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partition to be
+   * added, in the format of
+   * projects/{project}/databases/{databases}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int METASTORE_PARTITION_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1alpha.MetastorePartition metastorePartition_; + + /** + * + * + *
+   * Required. The metastore partition to be added.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the metastorePartition field is set. + */ + @java.lang.Override + public boolean hasMetastorePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The metastore partition to be added.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The metastorePartition. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getMetastorePartition() { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } + + /** + * + * + *
+   * Required. The metastore partition to be added.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getMetastorePartitionOrBuilder() { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getMetastorePartition()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getMetastorePartition()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest other = + (com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasMetastorePartition() != other.hasMetastorePartition()) return false; + if (hasMetastorePartition()) { + if (!getMetastorePartition().equals(other.getMetastorePartition())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasMetastorePartition()) { + hash = (37 * hash) + METASTORE_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getMetastorePartition().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for CreateMetastorePartition. The MetastorePartition is
+   * uniquely identified by values, which is an ordered list. Hence, there is no
+   * separate name or partition id field.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest) + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_CreateMetastorePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_CreateMetastorePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.class, + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getMetastorePartitionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + metastorePartition_ = null; + if (metastorePartitionBuilder_ != null) { + metastorePartitionBuilder_.dispose(); + metastorePartitionBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_CreateMetastorePartitionRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest build() { + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest result = + new com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.metastorePartition_ = + metastorePartitionBuilder_ == null + ? metastorePartition_ + : metastorePartitionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasMetastorePartition()) { + mergeMetastorePartition(other.getMetastorePartition()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + getMetastorePartitionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partition to be
+     * added, in the format of
+     * projects/{project}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partition to be
+     * added, in the format of
+     * projects/{project}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partition to be
+     * added, in the format of
+     * projects/{project}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partition to be
+     * added, in the format of
+     * projects/{project}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partition to be
+     * added, in the format of
+     * projects/{project}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1alpha.MetastorePartition metastorePartition_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + metastorePartitionBuilder_; + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the metastorePartition field is set. + */ + public boolean hasMetastorePartition() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The metastorePartition. + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getMetastorePartition() { + if (metastorePartitionBuilder_ == null) { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } else { + return metastorePartitionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setMetastorePartition( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (metastorePartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + metastorePartition_ = value; + } else { + metastorePartitionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setMetastorePartition( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (metastorePartitionBuilder_ == null) { + metastorePartition_ = builderForValue.build(); + } else { + metastorePartitionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeMetastorePartition( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (metastorePartitionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && metastorePartition_ != null + && metastorePartition_ + != com.google.cloud.bigquery.storage.v1alpha.MetastorePartition + .getDefaultInstance()) { + getMetastorePartitionBuilder().mergeFrom(value); + } else { + metastorePartition_ = value; + } + } else { + metastorePartitionBuilder_.mergeFrom(value); + } + if (metastorePartition_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearMetastorePartition() { + bitField0_ = (bitField0_ & ~0x00000002); + metastorePartition_ = null; + if (metastorePartitionBuilder_ != null) { + metastorePartitionBuilder_.dispose(); + metastorePartitionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + getMetastorePartitionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getMetastorePartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getMetastorePartitionOrBuilder() { + if (metastorePartitionBuilder_ != null) { + return metastorePartitionBuilder_.getMessageOrBuilder(); + } else { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getMetastorePartitionFieldBuilder() { + if (metastorePartitionBuilder_ == null) { + metastorePartitionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder>( + getMetastorePartition(), getParentForChildren(), isClean()); + metastorePartition_ = null; + } + return metastorePartitionBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest) + private static final com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateMetastorePartitionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequestOrBuilder.java new file mode 100644 index 000000000000..8512d70a1130 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequestOrBuilder.java @@ -0,0 +1,104 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface CreateMetastorePartitionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partition to be
+   * added, in the format of
+   * projects/{project}/databases/{databases}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partition to be
+   * added, in the format of
+   * projects/{project}/databases/{databases}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The metastore partition to be added.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the metastorePartition field is set. + */ + boolean hasMetastorePartition(); + + /** + * + * + *
+   * Required. The metastore partition to be added.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The metastorePartition. + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getMetastorePartition(); + + /** + * + * + *
+   * Required. The metastore partition to be added.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getMetastorePartitionOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchema.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchema.java new file mode 100644 index 000000000000..ed5c2ab4f61e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchema.java @@ -0,0 +1,838 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Schema description of a metastore partition column.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.FieldSchema} + */ +public final class FieldSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.FieldSchema) + FieldSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use FieldSchema.newBuilder() to construct. + private FieldSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FieldSchema() { + name_ = ""; + type_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FieldSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_FieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_FieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.FieldSchema.class, + com.google.cloud.bigquery.storage.v1alpha.FieldSchema.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name of the column.
+   * The maximum length of the name is 1024 characters
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the column.
+   * The maximum length of the name is 1024 characters
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object type_ = ""; + + /** + * + * + *
+   * Required. The type of the metastore partition column. Maximum allowed
+   * length is 1024 characters.
+   * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The type. + */ + @java.lang.Override + public java.lang.String getType() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The type of the metastore partition column. Maximum allowed
+   * length is 1024 characters.
+   * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for type. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(type_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, type_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(type_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, type_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha.FieldSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.FieldSchema other = + (com.google.cloud.bigquery.storage.v1alpha.FieldSchema) obj; + + if (!getName().equals(other.getName())) return false; + if (!getType().equals(other.getType())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.FieldSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Schema description of a metastore partition column.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.FieldSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.FieldSchema) + com.google.cloud.bigquery.storage.v1alpha.FieldSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_FieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_FieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.FieldSchema.class, + com.google.cloud.bigquery.storage.v1alpha.FieldSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1alpha.FieldSchema.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + type_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_FieldSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.FieldSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.FieldSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.FieldSchema build() { + com.google.cloud.bigquery.storage.v1alpha.FieldSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.FieldSchema buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.FieldSchema result = + new com.google.cloud.bigquery.storage.v1alpha.FieldSchema(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1alpha.FieldSchema result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = type_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha.FieldSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1alpha.FieldSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1alpha.FieldSchema other) { + if (other == com.google.cloud.bigquery.storage.v1alpha.FieldSchema.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getType().isEmpty()) { + type_ = other.type_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + type_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name of the column.
+     * The maximum length of the name is 1024 characters
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the column.
+     * The maximum length of the name is 1024 characters
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the column.
+     * The maximum length of the name is 1024 characters
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the column.
+     * The maximum length of the name is 1024 characters
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the column.
+     * The maximum length of the name is 1024 characters
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object type_ = ""; + + /** + * + * + *
+     * Required. The type of the metastore partition column. Maximum allowed
+     * length is 1024 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The type. + */ + public java.lang.String getType() { + java.lang.Object ref = type_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The type of the metastore partition column. Maximum allowed
+     * length is 1024 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for type. + */ + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The type of the metastore partition column. Maximum allowed
+     * length is 1024 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The type of the metastore partition column. Maximum allowed
+     * length is 1024 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearType() { + type_ = getDefaultInstance().getType(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The type of the metastore partition column. Maximum allowed
+     * length is 1024 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for type to set. + * @return This builder for chaining. + */ + public Builder setTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.FieldSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.FieldSchema) + private static final com.google.cloud.bigquery.storage.v1alpha.FieldSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha.FieldSchema(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.FieldSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FieldSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.FieldSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchemaOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchemaOrBuilder.java new file mode 100644 index 000000000000..1a95fe04c706 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchemaOrBuilder.java @@ -0,0 +1,82 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface FieldSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.FieldSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the column.
+   * The maximum length of the name is 1024 characters
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name of the column.
+   * The maximum length of the name is 1024 characters
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Required. The type of the metastore partition column. Maximum allowed
+   * length is 1024 characters.
+   * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The type. + */ + java.lang.String getType(); + + /** + * + * + *
+   * Required. The type of the metastore partition column. Maximum allowed
+   * length is 1024 characters.
+   * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for type. + */ + com.google.protobuf.ByteString getTypeBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequest.java new file mode 100644 index 000000000000..9d72cb0c210d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequest.java @@ -0,0 +1,1149 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Request message for ListMetastorePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest} + */ +public final class ListMetastorePartitionsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest) + ListMetastorePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ListMetastorePartitionsRequest.newBuilder() to construct. + private ListMetastorePartitionsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListMetastorePartitionsRequest() { + parent_ = ""; + filter_ = ""; + traceId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ListMetastorePartitionsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
+   * Optional. SQL text filtering statement, similar to a WHERE clause in a
+   * query. Only supports single-row expressions.  Aggregate functions are not
+   * supported.
+   *
+   * Examples: "int_field > 5"
+   *           "date_field = CAST('2014-9-27' as DATE)"
+   *           "nullable_field is not NULL"
+   *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+   *           "numeric_field BETWEEN 1.0 AND 5.0"
+   * Restricted to a maximum length for 1 MB.
+   * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. SQL text filtering statement, similar to a WHERE clause in a
+   * query. Only supports single-row expressions.  Aggregate functions are not
+   * supported.
+   *
+   * Examples: "int_field > 5"
+   *           "date_field = CAST('2014-9-27' as DATE)"
+   *           "nullable_field is not NULL"
+   *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+   *           "numeric_field BETWEEN 1.0 AND 5.0"
+   * Restricted to a maximum length for 1 MB.
+   * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TRACE_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object traceId_ = ""; + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + @java.lang.Override + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, filter_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, traceId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, filter_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, traceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest other = + (com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (!getTraceId().equals(other.getTraceId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTraceId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for ListMetastorePartitions.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest) + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + filter_ = ""; + traceId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest build() { + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest result = + new com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.filter_ = filter_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.traceId_ = traceId_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getTraceId().isEmpty()) { + traceId_ = other.traceId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + traceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object traceId_ = ""; + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + traceId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearTraceId() { + traceId_ = getDefaultInstance().getTraceId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + traceId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest) + private static final com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListMetastorePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..481e4ab42db6 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequestOrBuilder.java @@ -0,0 +1,138 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface ListMetastorePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. SQL text filtering statement, similar to a WHERE clause in a
+   * query. Only supports single-row expressions.  Aggregate functions are not
+   * supported.
+   *
+   * Examples: "int_field > 5"
+   *           "date_field = CAST('2014-9-27' as DATE)"
+   *           "nullable_field is not NULL"
+   *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+   *           "numeric_field BETWEEN 1.0 AND 5.0"
+   * Restricted to a maximum length for 1 MB.
+   * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
+   * Optional. SQL text filtering statement, similar to a WHERE clause in a
+   * query. Only supports single-row expressions.  Aggregate functions are not
+   * supported.
+   *
+   * Examples: "int_field > 5"
+   *           "date_field = CAST('2014-9-27' as DATE)"
+   *           "nullable_field is not NULL"
+   *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+   *           "numeric_field BETWEEN 1.0 AND 5.0"
+   * Restricted to a maximum length for 1 MB.
+   * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + java.lang.String getTraceId(); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + com.google.protobuf.ByteString getTraceIdBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponse.java new file mode 100644 index 000000000000..10822cbf876f --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponse.java @@ -0,0 +1,1173 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Response message for ListMetastorePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse} + */ +public final class ListMetastorePartitionsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse) + ListMetastorePartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ListMetastorePartitionsResponse.newBuilder() to construct. + private ListMetastorePartitionsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListMetastorePartitionsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ListMetastorePartitionsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse.Builder + .class); + } + + private int responseCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object response_; + + public enum ResponseCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + PARTITIONS(1), + STREAMS(2), + RESPONSE_NOT_SET(0); + private final int value; + + private ResponseCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ResponseCase valueOf(int value) { + return forNumber(value); + } + + public static ResponseCase forNumber(int value) { + switch (value) { + case 1: + return PARTITIONS; + case 2: + return STREAMS; + case 0: + return RESPONSE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ResponseCase getResponseCase() { + return ResponseCase.forNumber(responseCase_); + } + + public static final int PARTITIONS_FIELD_NUMBER = 1; + + /** + * + * + *
+   * The list of partitions.
+   * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + * + * @return Whether the partitions field is set. + */ + @java.lang.Override + public boolean hasPartitions() { + return responseCase_ == 1; + } + + /** + * + * + *
+   * The list of partitions.
+   * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + * + * @return The partitions. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList getPartitions() { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) response_; + } + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.getDefaultInstance(); + } + + /** + * + * + *
+   * The list of partitions.
+   * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionListOrBuilder + getPartitionsOrBuilder() { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) response_; + } + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.getDefaultInstance(); + } + + public static final int STREAMS_FIELD_NUMBER = 2; + + /** + * + * + *
+   * The list of streams.
+   * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + * + * @return Whether the streams field is set. + */ + @java.lang.Override + public boolean hasStreams() { + return responseCase_ == 2; + } + + /** + * + * + *
+   * The list of streams.
+   * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + * + * @return The streams. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamList getStreams() { + if (responseCase_ == 2) { + return (com.google.cloud.bigquery.storage.v1alpha.StreamList) response_; + } + return com.google.cloud.bigquery.storage.v1alpha.StreamList.getDefaultInstance(); + } + + /** + * + * + *
+   * The list of streams.
+   * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamListOrBuilder getStreamsOrBuilder() { + if (responseCase_ == 2) { + return (com.google.cloud.bigquery.storage.v1alpha.StreamList) response_; + } + return com.google.cloud.bigquery.storage.v1alpha.StreamList.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (responseCase_ == 1) { + output.writeMessage( + 1, (com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) response_); + } + if (responseCase_ == 2) { + output.writeMessage(2, (com.google.cloud.bigquery.storage.v1alpha.StreamList) response_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (responseCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) response_); + } + if (responseCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.cloud.bigquery.storage.v1alpha.StreamList) response_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse other = + (com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse) obj; + + if (!getResponseCase().equals(other.getResponseCase())) return false; + switch (responseCase_) { + case 1: + if (!getPartitions().equals(other.getPartitions())) return false; + break; + case 2: + if (!getStreams().equals(other.getStreams())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (responseCase_) { + case 1: + hash = (37 * hash) + PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitions().hashCode(); + break; + case 2: + hash = (37 * hash) + STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getStreams().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for ListMetastorePartitions.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse) + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partitionsBuilder_ != null) { + partitionsBuilder_.clear(); + } + if (streamsBuilder_ != null) { + streamsBuilder_.clear(); + } + responseCase_ = 0; + response_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse build() { + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse result = + new com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse result) { + result.responseCase_ = responseCase_; + result.response_ = this.response_; + if (responseCase_ == 1 && partitionsBuilder_ != null) { + result.response_ = partitionsBuilder_.build(); + } + if (responseCase_ == 2 && streamsBuilder_ != null) { + result.response_ = streamsBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse + .getDefaultInstance()) return this; + switch (other.getResponseCase()) { + case PARTITIONS: + { + mergePartitions(other.getPartitions()); + break; + } + case STREAMS: + { + mergeStreams(other.getStreams()); + break; + } + case RESPONSE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getPartitionsFieldBuilder().getBuilder(), extensionRegistry); + responseCase_ = 1; + break; + } // case 10 + case 18: + { + input.readMessage(getStreamsFieldBuilder().getBuilder(), extensionRegistry); + responseCase_ = 2; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int responseCase_ = 0; + private java.lang.Object response_; + + public ResponseCase getResponseCase() { + return ResponseCase.forNumber(responseCase_); + } + + public Builder clearResponse() { + responseCase_ = 0; + response_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionListOrBuilder> + partitionsBuilder_; + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + * + * @return Whether the partitions field is set. + */ + @java.lang.Override + public boolean hasPartitions() { + return responseCase_ == 1; + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + * + * @return The partitions. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList getPartitions() { + if (partitionsBuilder_ == null) { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) response_; + } + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList + .getDefaultInstance(); + } else { + if (responseCase_ == 1) { + return partitionsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList + .getDefaultInstance(); + } + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + */ + public Builder setPartitions( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + response_ = value; + onChanged(); + } else { + partitionsBuilder_.setMessage(value); + } + responseCase_ = 1; + return this; + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + */ + public Builder setPartitions( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.Builder builderForValue) { + if (partitionsBuilder_ == null) { + response_ = builderForValue.build(); + onChanged(); + } else { + partitionsBuilder_.setMessage(builderForValue.build()); + } + responseCase_ = 1; + return this; + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + */ + public Builder mergePartitions( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList value) { + if (partitionsBuilder_ == null) { + if (responseCase_ == 1 + && response_ + != com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList + .getDefaultInstance()) { + response_ = + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.newBuilder( + (com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) response_) + .mergeFrom(value) + .buildPartial(); + } else { + response_ = value; + } + onChanged(); + } else { + if (responseCase_ == 1) { + partitionsBuilder_.mergeFrom(value); + } else { + partitionsBuilder_.setMessage(value); + } + } + responseCase_ = 1; + return this; + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + */ + public Builder clearPartitions() { + if (partitionsBuilder_ == null) { + if (responseCase_ == 1) { + responseCase_ = 0; + response_ = null; + onChanged(); + } + } else { + if (responseCase_ == 1) { + responseCase_ = 0; + response_ = null; + } + partitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.Builder + getPartitionsBuilder() { + return getPartitionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionListOrBuilder + getPartitionsOrBuilder() { + if ((responseCase_ == 1) && (partitionsBuilder_ != null)) { + return partitionsBuilder_.getMessageOrBuilder(); + } else { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) response_; + } + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList + .getDefaultInstance(); + } + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionListOrBuilder> + getPartitionsFieldBuilder() { + if (partitionsBuilder_ == null) { + if (!(responseCase_ == 1)) { + response_ = + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.getDefaultInstance(); + } + partitionsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionListOrBuilder>( + (com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) response_, + getParentForChildren(), + isClean()); + response_ = null; + } + responseCase_ = 1; + onChanged(); + return partitionsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.StreamList, + com.google.cloud.bigquery.storage.v1alpha.StreamList.Builder, + com.google.cloud.bigquery.storage.v1alpha.StreamListOrBuilder> + streamsBuilder_; + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + * + * @return Whether the streams field is set. + */ + @java.lang.Override + public boolean hasStreams() { + return responseCase_ == 2; + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + * + * @return The streams. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamList getStreams() { + if (streamsBuilder_ == null) { + if (responseCase_ == 2) { + return (com.google.cloud.bigquery.storage.v1alpha.StreamList) response_; + } + return com.google.cloud.bigquery.storage.v1alpha.StreamList.getDefaultInstance(); + } else { + if (responseCase_ == 2) { + return streamsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1alpha.StreamList.getDefaultInstance(); + } + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + */ + public Builder setStreams(com.google.cloud.bigquery.storage.v1alpha.StreamList value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + response_ = value; + onChanged(); + } else { + streamsBuilder_.setMessage(value); + } + responseCase_ = 2; + return this; + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + */ + public Builder setStreams( + com.google.cloud.bigquery.storage.v1alpha.StreamList.Builder builderForValue) { + if (streamsBuilder_ == null) { + response_ = builderForValue.build(); + onChanged(); + } else { + streamsBuilder_.setMessage(builderForValue.build()); + } + responseCase_ = 2; + return this; + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + */ + public Builder mergeStreams(com.google.cloud.bigquery.storage.v1alpha.StreamList value) { + if (streamsBuilder_ == null) { + if (responseCase_ == 2 + && response_ + != com.google.cloud.bigquery.storage.v1alpha.StreamList.getDefaultInstance()) { + response_ = + com.google.cloud.bigquery.storage.v1alpha.StreamList.newBuilder( + (com.google.cloud.bigquery.storage.v1alpha.StreamList) response_) + .mergeFrom(value) + .buildPartial(); + } else { + response_ = value; + } + onChanged(); + } else { + if (responseCase_ == 2) { + streamsBuilder_.mergeFrom(value); + } else { + streamsBuilder_.setMessage(value); + } + } + responseCase_ = 2; + return this; + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + */ + public Builder clearStreams() { + if (streamsBuilder_ == null) { + if (responseCase_ == 2) { + responseCase_ = 0; + response_ = null; + onChanged(); + } + } else { + if (responseCase_ == 2) { + responseCase_ = 0; + response_ = null; + } + streamsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + */ + public com.google.cloud.bigquery.storage.v1alpha.StreamList.Builder getStreamsBuilder() { + return getStreamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamListOrBuilder getStreamsOrBuilder() { + if ((responseCase_ == 2) && (streamsBuilder_ != null)) { + return streamsBuilder_.getMessageOrBuilder(); + } else { + if (responseCase_ == 2) { + return (com.google.cloud.bigquery.storage.v1alpha.StreamList) response_; + } + return com.google.cloud.bigquery.storage.v1alpha.StreamList.getDefaultInstance(); + } + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.StreamList, + com.google.cloud.bigquery.storage.v1alpha.StreamList.Builder, + com.google.cloud.bigquery.storage.v1alpha.StreamListOrBuilder> + getStreamsFieldBuilder() { + if (streamsBuilder_ == null) { + if (!(responseCase_ == 2)) { + response_ = com.google.cloud.bigquery.storage.v1alpha.StreamList.getDefaultInstance(); + } + streamsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.StreamList, + com.google.cloud.bigquery.storage.v1alpha.StreamList.Builder, + com.google.cloud.bigquery.storage.v1alpha.StreamListOrBuilder>( + (com.google.cloud.bigquery.storage.v1alpha.StreamList) response_, + getParentForChildren(), + isClean()); + response_ = null; + } + responseCase_ = 2; + onChanged(); + return streamsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse) + private static final com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListMetastorePartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponseOrBuilder.java new file mode 100644 index 000000000000..b7aefe9d0a51 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponseOrBuilder.java @@ -0,0 +1,104 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface ListMetastorePartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of partitions.
+   * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + * + * @return Whether the partitions field is set. + */ + boolean hasPartitions(); + + /** + * + * + *
+   * The list of partitions.
+   * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + * + * @return The partitions. + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList getPartitions(); + + /** + * + * + *
+   * The list of partitions.
+   * 
+ * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartitionList partitions = 1; + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionListOrBuilder + getPartitionsOrBuilder(); + + /** + * + * + *
+   * The list of streams.
+   * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + * + * @return Whether the streams field is set. + */ + boolean hasStreams(); + + /** + * + * + *
+   * The list of streams.
+   * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + * + * @return The streams. + */ + com.google.cloud.bigquery.storage.v1alpha.StreamList getStreams(); + + /** + * + * + *
+   * The list of streams.
+   * 
+ * + * .google.cloud.bigquery.storage.v1alpha.StreamList streams = 2; + */ + com.google.cloud.bigquery.storage.v1alpha.StreamListOrBuilder getStreamsOrBuilder(); + + com.google.cloud.bigquery.storage.v1alpha.ListMetastorePartitionsResponse.ResponseCase + getResponseCase(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartition.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartition.java new file mode 100644 index 000000000000..c66ca131caf0 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartition.java @@ -0,0 +1,2349 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Information about a Hive partition.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.MetastorePartition} + */ +public final class MetastorePartition extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.MetastorePartition) + MetastorePartitionOrBuilder { + private static final long serialVersionUID = 0L; + + // Use MetastorePartition.newBuilder() to construct. + private MetastorePartition(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MetastorePartition() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + fields_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new MetastorePartition(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.class, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder.class); + } + + private int bitField0_; + public static final int VALUES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + return values_; + } + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + public static final int CREATE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int STORAGE_DESCRIPTOR_FIELD_NUMBER = 3; + private com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor storageDescriptor_; + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the storageDescriptor field is set. + */ + @java.lang.Override + public boolean hasStorageDescriptor() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The storageDescriptor. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor getStorageDescriptor() { + return storageDescriptor_ == null + ? com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StorageDescriptorOrBuilder + getStorageDescriptorOrBuilder() { + return storageDescriptor_ == null + ? com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } + + public static final int PARAMETERS_FIELD_NUMBER = 4; + + private static final class ParametersDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_ParametersEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField(ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int FIELDS_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private java.util.List fields_; + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getFieldsList() { + return fields_; + } + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getFieldsOrBuilderList() { + return fields_; + } + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getFieldsCount() { + return fields_.size(); + } + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.FieldSchema getFields(int index) { + return fields_.get(index); + } + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.FieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + return fields_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < values_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, values_.getRaw(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getStorageDescriptor()); + } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, internalGetParameters(), ParametersDefaultEntryHolder.defaultEntry, 4); + for (int i = 0; i < fields_.size(); i++) { + output.writeMessage(5, fields_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < values_.size(); i++) { + dataSize += computeStringSizeNoTag(values_.getRaw(i)); + } + size += dataSize; + size += 1 * getValuesList().size(); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getStorageDescriptor()); + } + for (java.util.Map.Entry entry : + internalGetParameters().getMap().entrySet()) { + com.google.protobuf.MapEntry parameters__ = + ParametersDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, parameters__); + } + for (int i = 0; i < fields_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, fields_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha.MetastorePartition)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition other = + (com.google.cloud.bigquery.storage.v1alpha.MetastorePartition) obj; + + if (!getValuesList().equals(other.getValuesList())) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasStorageDescriptor() != other.hasStorageDescriptor()) return false; + if (hasStorageDescriptor()) { + if (!getStorageDescriptor().equals(other.getStorageDescriptor())) return false; + } + if (!internalGetParameters().equals(other.internalGetParameters())) return false; + if (!getFieldsList().equals(other.getFieldsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasStorageDescriptor()) { + hash = (37 * hash) + STORAGE_DESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getStorageDescriptor().hashCode(); + } + if (!internalGetParameters().getMap().isEmpty()) { + hash = (37 * hash) + PARAMETERS_FIELD_NUMBER; + hash = (53 * hash) + internalGetParameters().hashCode(); + } + if (getFieldsCount() > 0) { + hash = (37 * hash) + FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getFieldsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information about a Hive partition.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.MetastorePartition} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.MetastorePartition) + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetMutableParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.class, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getCreateTimeFieldBuilder(); + getStorageDescriptorFieldBuilder(); + getFieldsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + storageDescriptor_ = null; + if (storageDescriptorBuilder_ != null) { + storageDescriptorBuilder_.dispose(); + storageDescriptorBuilder_ = null; + } + internalGetMutableParameters().clear(); + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + } else { + fields_ = null; + fieldsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition build() { + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition result = + new com.google.cloud.bigquery.storage.v1alpha.MetastorePartition(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition result) { + if (fieldsBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.fields_ = fields_; + } else { + result.fields_ = fieldsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + values_.makeImmutable(); + result.values_ = values_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.storageDescriptor_ = + storageDescriptorBuilder_ == null + ? storageDescriptor_ + : storageDescriptorBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.parameters_ = internalGetParameters(); + result.parameters_.makeImmutable(); + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha.MetastorePartition) { + return mergeFrom((com.google.cloud.bigquery.storage.v1alpha.MetastorePartition) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1alpha.MetastorePartition other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance()) + return this; + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ |= 0x00000001; + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasStorageDescriptor()) { + mergeStorageDescriptor(other.getStorageDescriptor()); + } + internalGetMutableParameters().mergeFrom(other.internalGetParameters()); + bitField0_ |= 0x00000008; + if (fieldsBuilder_ == null) { + if (!other.fields_.isEmpty()) { + if (fields_.isEmpty()) { + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureFieldsIsMutable(); + fields_.addAll(other.fields_); + } + onChanged(); + } + } else { + if (!other.fields_.isEmpty()) { + if (fieldsBuilder_.isEmpty()) { + fieldsBuilder_.dispose(); + fieldsBuilder_ = null; + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000010); + fieldsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getFieldsFieldBuilder() + : null; + } else { + fieldsBuilder_.addAllMessages(other.fields_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureValuesIsMutable(); + values_.add(s); + break; + } // case 10 + case 18: + { + input.readMessage(getCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + getStorageDescriptorFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.protobuf.MapEntry parameters__ = + input.readMessage( + ParametersDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableParameters() + .getMutableMap() + .put(parameters__.getKey(), parameters__.getValue()); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + com.google.cloud.bigquery.storage.v1alpha.FieldSchema m = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha.FieldSchema.parser(), + extensionRegistry); + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(m); + } else { + fieldsBuilder_.addMessage(m); + } + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureValuesIsMutable() { + if (!values_.isModifiable()) { + values_ = new com.google.protobuf.LazyStringArrayList(values_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + values_.makeImmutable(); + return values_; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The values to set. + * @return This builder for chaining. + */ + public Builder setValues(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The values to add. + * @return This builder for chaining. + */ + public Builder addValues(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The values to add. + * @return This builder for chaining. + */ + public Builder addAllValues(java.lang.Iterable values) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearValues() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the values to add. + * @return This builder for chaining. + */ + public Builder addValuesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000002); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor storageDescriptor_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor, + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.Builder, + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptorOrBuilder> + storageDescriptorBuilder_; + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the storageDescriptor field is set. + */ + public boolean hasStorageDescriptor() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The storageDescriptor. + */ + public com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor getStorageDescriptor() { + if (storageDescriptorBuilder_ == null) { + return storageDescriptor_ == null + ? com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } else { + return storageDescriptorBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setStorageDescriptor( + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor value) { + if (storageDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + storageDescriptor_ = value; + } else { + storageDescriptorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setStorageDescriptor( + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.Builder builderForValue) { + if (storageDescriptorBuilder_ == null) { + storageDescriptor_ = builderForValue.build(); + } else { + storageDescriptorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeStorageDescriptor( + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor value) { + if (storageDescriptorBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && storageDescriptor_ != null + && storageDescriptor_ + != com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor + .getDefaultInstance()) { + getStorageDescriptorBuilder().mergeFrom(value); + } else { + storageDescriptor_ = value; + } + } else { + storageDescriptorBuilder_.mergeFrom(value); + } + if (storageDescriptor_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearStorageDescriptor() { + bitField0_ = (bitField0_ & ~0x00000004); + storageDescriptor_ = null; + if (storageDescriptorBuilder_ != null) { + storageDescriptorBuilder_.dispose(); + storageDescriptorBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.Builder + getStorageDescriptorBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getStorageDescriptorFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.StorageDescriptorOrBuilder + getStorageDescriptorOrBuilder() { + if (storageDescriptorBuilder_ != null) { + return storageDescriptorBuilder_.getMessageOrBuilder(); + } else { + return storageDescriptor_ == null + ? com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor, + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.Builder, + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptorOrBuilder> + getStorageDescriptorFieldBuilder() { + if (storageDescriptorBuilder_ == null) { + storageDescriptorBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor, + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.Builder, + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptorOrBuilder>( + getStorageDescriptor(), getParentForChildren(), isClean()); + storageDescriptor_ = null; + } + return storageDescriptorBuilder_; + } + + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField + internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + private com.google.protobuf.MapField + internalGetMutableParameters() { + if (parameters_ == null) { + parameters_ = + com.google.protobuf.MapField.newMapField(ParametersDefaultEntryHolder.defaultEntry); + } + if (!parameters_.isMutable()) { + parameters_ = parameters_.copy(); + } + bitField0_ |= 0x00000008; + onChanged(); + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearParameters() { + bitField0_ = (bitField0_ & ~0x00000008); + internalGetMutableParameters().getMutableMap().clear(); + return this; + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableParameters().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableParameters() { + bitField0_ |= 0x00000008; + return internalGetMutableParameters().getMutableMap(); + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putParameters(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableParameters().getMutableMap().put(key, value); + bitField0_ |= 0x00000008; + return this; + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllParameters(java.util.Map values) { + internalGetMutableParameters().getMutableMap().putAll(values); + bitField0_ |= 0x00000008; + return this; + } + + private java.util.List fields_ = + java.util.Collections.emptyList(); + + private void ensureFieldsIsMutable() { + if (!((bitField0_ & 0x00000010) != 0)) { + fields_ = + new java.util.ArrayList(fields_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.FieldSchema, + com.google.cloud.bigquery.storage.v1alpha.FieldSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha.FieldSchemaOrBuilder> + fieldsBuilder_; + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getFieldsList() { + if (fieldsBuilder_ == null) { + return java.util.Collections.unmodifiableList(fields_); + } else { + return fieldsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getFieldsCount() { + if (fieldsBuilder_ == null) { + return fields_.size(); + } else { + return fieldsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.FieldSchema getFields(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1alpha.FieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.set(index, value); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1alpha.FieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.set(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields(com.google.cloud.bigquery.storage.v1alpha.FieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(value); + onChanged(); + } else { + fieldsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1alpha.FieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(index, value); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + com.google.cloud.bigquery.storage.v1alpha.FieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1alpha.FieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllFields( + java.lang.Iterable + values) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); + onChanged(); + } else { + fieldsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearFields() { + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + fieldsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeFields(int index) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.remove(index); + onChanged(); + } else { + fieldsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.FieldSchema.Builder getFieldsBuilder( + int index) { + return getFieldsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.FieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsOrBuilderList() { + if (fieldsBuilder_ != null) { + return fieldsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(fields_); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.FieldSchema.Builder addFieldsBuilder() { + return getFieldsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1alpha.FieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.FieldSchema.Builder addFieldsBuilder( + int index) { + return getFieldsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1alpha.FieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsBuilderList() { + return getFieldsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.FieldSchema, + com.google.cloud.bigquery.storage.v1alpha.FieldSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha.FieldSchemaOrBuilder> + getFieldsFieldBuilder() { + if (fieldsBuilder_ == null) { + fieldsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.FieldSchema, + com.google.cloud.bigquery.storage.v1alpha.FieldSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha.FieldSchemaOrBuilder>( + fields_, ((bitField0_ & 0x00000010) != 0), getParentForChildren(), isClean()); + fields_ = null; + } + return fieldsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.MetastorePartition) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.MetastorePartition) + private static final com.google.cloud.bigquery.storage.v1alpha.MetastorePartition + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha.MetastorePartition(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MetastorePartition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionList.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionList.java new file mode 100644 index 000000000000..3bad3b0fc519 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionList.java @@ -0,0 +1,1036 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * List of metastore partitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.MetastorePartitionList} + */ +public final class MetastorePartitionList extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) + MetastorePartitionListOrBuilder { + private static final long serialVersionUID = 0L; + + // Use MetastorePartitionList.newBuilder() to construct. + private MetastorePartitionList(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MetastorePartitionList() { + partitions_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new MetastorePartitionList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.class, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.Builder.class); + } + + public static final int PARTITIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List partitions_; + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getPartitionsList() { + return partitions_; + } + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getPartitionsOrBuilderList() { + return partitions_; + } + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getPartitionsCount() { + return partitions_.size(); + } + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getPartitions(int index) { + return partitions_.get(index); + } + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getPartitionsOrBuilder(int index) { + return partitions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < partitions_.size(); i++) { + output.writeMessage(1, partitions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < partitions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, partitions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList other = + (com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) obj; + + if (!getPartitionsList().equals(other.getPartitionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPartitionsCount() > 0) { + hash = (37 * hash) + PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * List of metastore partitions.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.MetastorePartitionList} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.class, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + } else { + partitions_ = null; + partitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionList_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList build() { + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList result = + new com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList result) { + if (partitionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + partitions_ = java.util.Collections.unmodifiableList(partitions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.partitions_ = partitions_; + } else { + result.partitions_ = partitionsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) { + return mergeFrom((com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList.getDefaultInstance()) + return this; + if (partitionsBuilder_ == null) { + if (!other.partitions_.isEmpty()) { + if (partitions_.isEmpty()) { + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePartitionsIsMutable(); + partitions_.addAll(other.partitions_); + } + onChanged(); + } + } else { + if (!other.partitions_.isEmpty()) { + if (partitionsBuilder_.isEmpty()) { + partitionsBuilder_.dispose(); + partitionsBuilder_ = null; + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + partitionsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getPartitionsFieldBuilder() + : null; + } else { + partitionsBuilder_.addAllMessages(other.partitions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition m = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.parser(), + extensionRegistry); + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(m); + } else { + partitionsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List + partitions_ = java.util.Collections.emptyList(); + + private void ensurePartitionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + partitions_ = + new java.util.ArrayList( + partitions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + partitionsBuilder_; + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getPartitionsList() { + if (partitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitions_); + } else { + return partitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getPartitionsCount() { + if (partitionsBuilder_ == null) { + return partitions_.size(); + } else { + return partitionsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getPartitions(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartitions( + int index, com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.set(index, value); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartitions( + int index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitions( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(value); + onChanged(); + } else { + partitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitions( + int index, com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(index, value); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitions( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitions( + int index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllPartitions( + java.lang.Iterable + values) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitions_); + onChanged(); + } else { + partitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearPartitions() { + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + partitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removePartitions(int index) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.remove(index); + onChanged(); + } else { + partitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + getPartitionsBuilder(int index) { + return getPartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getPartitionsOrBuilder(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getPartitionsOrBuilderList() { + if (partitionsBuilder_ != null) { + return partitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitions_); + } + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + addPartitionsBuilder() { + return getPartitionsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + addPartitionsBuilder(int index) { + return getPartitionsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getPartitionsBuilderList() { + return getPartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getPartitionsFieldBuilder() { + if (partitionsBuilder_ == null) { + partitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder>( + partitions_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + partitions_ = null; + } + return partitionsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) + private static final com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MetastorePartitionList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionList + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionListOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionListOrBuilder.java new file mode 100644 index 000000000000..82e4981e432e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionListOrBuilder.java @@ -0,0 +1,93 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface MetastorePartitionListOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.MetastorePartitionList) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getPartitionsList(); + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getPartitions(int index); + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getPartitionsCount(); + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getPartitionsOrBuilderList(); + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder getPartitionsOrBuilder( + int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionOrBuilder.java new file mode 100644 index 000000000000..de488c98ab88 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionOrBuilder.java @@ -0,0 +1,314 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface MetastorePartitionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.MetastorePartition) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + java.util.List getValuesList(); + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + int getValuesCount(); + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + java.lang.String getValues(int index); + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + com.google.protobuf.ByteString getValuesBytes(int index); + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the storageDescriptor field is set. + */ + boolean hasStorageDescriptor(); + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The storageDescriptor. + */ + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor getStorageDescriptor(); + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptorOrBuilder + getStorageDescriptorOrBuilder(); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getParametersCount(); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsParameters(java.lang.String key); + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Deprecated + java.util.Map getParameters(); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getParametersMap(); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + /* nullable */ + java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getParametersOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getFieldsList(); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.FieldSchema getFields(int index); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getFieldsCount(); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getFieldsOrBuilderList(); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.FieldSchemaOrBuilder getFieldsOrBuilder(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionProto.java new file mode 100644 index 000000000000..517fea249c4d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionProto.java @@ -0,0 +1,234 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public final class MetastorePartitionProto { + private MetastorePartitionProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_FieldSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_FieldSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_StorageDescriptor_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_StorageDescriptor_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_ParametersEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_ParametersEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_ParametersEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_ParametersEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionList_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionList_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_ReadStream_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_ReadStream_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_StreamList_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_StreamList_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionValues_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionValues_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "5google/cloud/bigquery/storage/v1alpha/partition.proto\022%google.cloud.bigquery.s" + + "torage.v1alpha\032\037google/api/field_behavio" + + "r.proto\032\031google/api/resource.proto\032\037google/protobuf/timestamp.proto\"3\n" + + "\013FieldSchema\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\002\022\021\n" + + "\004type\030\002 \001(\tB\003\340A\002\"\260\001\n" + + "\021StorageDescriptor\022\031\n" + + "\014location_uri\030\001 \001(\tB\003\340A\001\022\031\n" + + "\014input_format\030\002 \001(\tB\003\340A\001\022\032\n\r" + + "output_format\030\003 \001(\tB\003\340A\001\022I\n\n" + + "serde_info\030\004" + + " \001(\01320.google.cloud.bigquery.storage.v1alpha.SerDeInfoB\003\340A\001\"\320\001\n" + + "\tSerDeInfo\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\001\022\"\n" + + "\025serialization_library\030\002 \001(\tB\003\340A\002\022Y\n\n" + + "parameters\030\003 \003(\0132@.google.cl" + + "oud.bigquery.storage.v1alpha.SerDeInfo.ParametersEntryB\003\340A\001\0321\n" + + "\017ParametersEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001\"\232\003\n" + + "\022MetastorePartition\022\023\n" + + "\006values\030\001 \003(\tB\003\340A\002\0224\n" + + "\013create_time\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022Y\n" + + "\022storage_descriptor\030\003 \001(\01328.g" + + "oogle.cloud.bigquery.storage.v1alpha.StorageDescriptorB\003\340A\001\022b\n\n" + + "parameters\030\004 \003(\0132I.google.cloud.bigquery.storage.v1alpha." + + "MetastorePartition.ParametersEntryB\003\340A\001\022G\n" + + "\006fields\030\005" + + " \003(\01322.google.cloud.bigquery.storage.v1alpha.FieldSchemaB\003\340A\001\0321\n" + + "\017ParametersEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001\"l\n" + + "\026MetastorePartitionList\022R\n\n" + + "partitions\030\001" + + " \003(\01329.google.cloud.bigquery.storage.v1alpha.MetastorePartitionB\003\340A\002\"\272\001\n\n" + + "ReadStream\022\024\n" + + "\004name\030\001 \001(\tB\006\340A\003\340A\010:\225\001\352A\221\001\n" + + ")bigquerystorage.googleapis.com/ReadStream\022Kprojects/{project}/locations/{location" + + "}/sessions/{session}/streams/{stream}*\013readStreams2\n" + + "readStream\"U\n\n" + + "StreamList\022G\n" + + "\007streams\030\001" + + " \003(\01321.google.cloud.bigquery.storage.v1alpha.ReadStreamB\003\340A\003\"/\n" + + "\030MetastorePartitionValues\022\023\n" + + "\006values\030\001 \003(\tB\003\340A\002B\333\001\n" + + ")com.google.cloud.bigquery.storage.v1alphaB\027MetastorePartitionProtoP\001ZCcloud.g" + + "oogle.com/go/bigquery/storage/apiv1alpha/storagepb;storagepb\252\002%Google.Cloud.BigQ" + + "uery.Storage.V1Alpha\312\002%Google\\Cloud\\BigQuery\\Storage\\V1alphab\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1alpha_FieldSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1alpha_FieldSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_FieldSchema_descriptor, + new java.lang.String[] { + "Name", "Type", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_StorageDescriptor_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1alpha_StorageDescriptor_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_StorageDescriptor_descriptor, + new java.lang.String[] { + "LocationUri", "InputFormat", "OutputFormat", "SerdeInfo", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_descriptor, + new java.lang.String[] { + "Name", "SerializationLibrary", "Parameters", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_ParametersEntry_descriptor = + internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_ParametersEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_ParametersEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_descriptor, + new java.lang.String[] { + "Values", "CreateTime", "StorageDescriptor", "Parameters", "Fields", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_ParametersEntry_descriptor = + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_ParametersEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartition_ParametersEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionList_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionList_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionList_descriptor, + new java.lang.String[] { + "Partitions", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_ReadStream_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_bigquery_storage_v1alpha_ReadStream_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_ReadStream_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_StreamList_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_bigquery_storage_v1alpha_StreamList_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_StreamList_descriptor, + new java.lang.String[] { + "Streams", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionValues_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionValues_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionValues_descriptor, + new java.lang.String[] { + "Values", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.ResourceProto.resource); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceProto.java new file mode 100644 index 000000000000..02f151c3bc43 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceProto.java @@ -0,0 +1,316 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public final class MetastorePartitionServiceProto { + private MetastorePartitionServiceProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_CreateMetastorePartitionRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_CreateMetastorePartitionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_BatchDeleteMetastorePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_BatchDeleteMetastorePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_UpdateMetastorePartitionRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_UpdateMetastorePartitionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha_BatchSizeTooLargeError_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha_BatchSizeTooLargeError_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "?google/cloud/bigquery/storage/v1alpha/metastore_partition.proto\022%google.cloud." + + "bigquery.storage.v1alpha\032\034google/api/ann" + + "otations.proto\032\027google/api/client.proto\032" + + "\037google/api/field_behavior.proto\032\031google" + + "/api/resource.proto\0325google/cloud/bigque" + + "ry/storage/v1alpha/partition.proto\032\033google/protobuf/empty.proto\032" + + " google/protobuf/field_mask.proto\"\265\001\n" + + "\037CreateMetastorePartitionRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035bigquery.googleapis.com/Table\022[\n" + + "\023metastore_partition\030\002 \001(\01329.google.cloud.bigquer" + + "y.storage.v1alpha.MetastorePartitionB\003\340A\002\"\373\001\n" + + "%BatchCreateMetastorePartitionsRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035bigquery.googleapis.com/Table\022]\n" + + "\010requests\030\002 \003(\0132F.g" + + "oogle.cloud.bigquery.storage.v1alpha.CreateMetastorePartitionRequestB\003\340A\002\022%\n" + + "\030skip_existing_partitions\030\003 \001(\010B\003\340A\001\022\025\n" + + "\010trace_id\030\004 \001(\tB\003\340A\001\"w\n" + + "&BatchCreateMetastorePartitionsResponse\022M\n\n" + + "partitions\030\001 \003(\01329." + + "google.cloud.bigquery.storage.v1alpha.MetastorePartition\"\325\001\n" + + "%BatchDeleteMetastorePartitionsRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035bigquery.googleapis.com/Table\022^\n" + + "\020partition_values\030\002 \003(\0132?.google.cloud.bigqu" + + "ery.storage.v1alpha.MetastorePartitionValuesB\003\340A\002\022\025\n" + + "\010trace_id\030\004 \001(\tB\003\340A\001\"\264\001\n" + + "\037UpdateMetastorePartitionRequest\022[\n" + + "\023metastore_partition\030\001 \001(\01329.google.cloud.bigquer" + + "y.storage.v1alpha.MetastorePartitionB\003\340A\002\0224\n" + + "\013update_mask\030\002" + + " \001(\0132\032.google.protobuf.FieldMaskB\003\340A\001\"\324\001\n" + + "%BatchUpdateMetastorePartitionsRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035bigquery.googleapis.com/Table\022]\n" + + "\010requests\030\002 \003(\0132F.google.cloud.bigquery.stora" + + "ge.v1alpha.UpdateMetastorePartitionRequestB\003\340A\002\022\025\n" + + "\010trace_id\030\004 \001(\tB\003\340A\001\"w\n" + + "&BatchUpdateMetastorePartitionsResponse\022M\n\n" + + "partitions\030\001" + + " \003(\01329.google.cloud.bigquery.storage.v1alpha.MetastorePartition\"\203\001\n" + + "\036ListMetastorePartitionsRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035bigquery.googleapis.com/Table\022\023\n" + + "\006filter\030\002 \001(\tB\003\340A\001\022\025\n" + + "\010trace_id\030\003 \001(\tB\003\340A\001\"\310\001\n" + + "\037ListMetastorePartitionsResponse\022S\n\n" + + "partitions\030\001 \001(\0132=.google.cloud.big" + + "query.storage.v1alpha.MetastorePartitionListH\000\022D\n" + + "\007streams\030\002" + + " \001(\01321.google.cloud.bigquery.storage.v1alpha.StreamListH\000B\n\n" + + "\010response\"\336\001\n" + + " StreamMetastorePartitionsRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035bigquery.googleapis.com/Table\022\\\n" + + "\024metastore_partitions\030\002" + + " \003(\01329.google.cloud.bigquery.storage.v1alpha.MetastorePartitionB\003\340A\001\022%\n" + + "\030skip_existing_partitions\030\003 \001(\010B\003\340A\001\"u\n" + + "!StreamMetastorePartitionsResponse\022\'\n" + + "\037total_partitions_streamed_count\030\002 \001(\003\022\'\n" + + "\037total_partitions_inserted_count\030\003 \001(\003\"L\n" + + "\026BatchSizeTooLargeError\022\026\n" + + "\016max_batch_size\030\001 \001(\003\022\032\n\r" + + "error_message\030\002 \001(\tB\003\340A\0012\336\n\n" + + "\031MetastorePartitionService\022\221\002\n" + + "\036BatchCreateMetastorePartitions\022L.google.cloud.bigquery." + + "storage.v1alpha.BatchCreateMetastorePartitionsRequest\032M.google.cloud.bigquery.st" + + "orage.v1alpha.BatchCreateMetastorePartit" + + "ionsResponse\"R\202\323\344\223\002L\"G/v1alpha/{parent=p" + + "rojects/*/datasets/*/tables/*}/partitions:batchCreate:\001*\022\332\001\n" + + "\036BatchDeleteMetastorePartitions\022L.google.cloud.bigquery.stor" + + "age.v1alpha.BatchDeleteMetastorePartitio" + + "nsRequest\032\026.google.protobuf.Empty\"R\202\323\344\223\002" + + "L\"G/v1alpha/{parent=projects/*/datasets/" + + "*/tables/*}/partitions:batchDelete:\001*\022\221\002\n" + + "\036BatchUpdateMetastorePartitions\022L.google.cloud.bigquery.storage.v1alpha.BatchUp" + + "dateMetastorePartitionsRequest\032M.google.cloud.bigquery.storage.v1alpha.BatchUpda" + + "teMetastorePartitionsResponse\"R\202\323\344\223\002L\"G/" + + "v1alpha/{parent=projects/*/datasets/*/tables/*}/partitions:batchUpdate:\001*\022\207\002\n" + + "\027ListMetastorePartitions\022E.google.cloud.big" + + "query.storage.v1alpha.ListMetastorePartitionsRequest\032F.google.cloud.bigquery.sto" + + "rage.v1alpha.ListMetastorePartitionsResp" + + "onse\"]\332A\006parent\202\323\344\223\002N\022L/v1alpha/{parent=" + + "projects/*/locations/*/datasets/*/tables/*}/partitions:list\022\264\001\n" + + "\031StreamMetastorePartitions\022G.google.cloud.bigquery.storag" + + "e.v1alpha.StreamMetastorePartitionsRequest\032H.google.cloud.bigquery.storage.v1alp" + + "ha.StreamMetastorePartitionsResponse\"\000(\001" + + "0\001\032{\312A\036bigquerystorage.googleapis.com\322AW" + + "https://www.googleapis.com/auth/bigquery" + + ",https://www.googleapis.com/auth/cloud-platformB\272\002\n" + + ")com.google.cloud.bigquery.storage.v1alphaB\036MetastorePartitionService" + + "ProtoP\001ZCcloud.google.com/go/bigquery/st" + + "orage/apiv1alpha/storagepb;storagepb\252\002%G" + + "oogle.Cloud.BigQuery.Storage.V1Alpha\312\002%Google\\Cloud\\BigQuery\\Storage\\V1alpha\352AU\n" + + "\035bigquery.googleapis.com/Table\0224projects/{project}/datasets/{dataset}/tables/{ta" + + "ble}b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1alpha_CreateMetastorePartitionRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1alpha_CreateMetastorePartitionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_CreateMetastorePartitionRequest_descriptor, + new java.lang.String[] { + "Parent", "MetastorePartition", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsRequest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "Requests", "SkipExistingPartitions", "TraceId", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsResponse_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_BatchCreateMetastorePartitionsResponse_descriptor, + new java.lang.String[] { + "Partitions", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_BatchDeleteMetastorePartitionsRequest_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_bigquery_storage_v1alpha_BatchDeleteMetastorePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_BatchDeleteMetastorePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "PartitionValues", "TraceId", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_UpdateMetastorePartitionRequest_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_bigquery_storage_v1alpha_UpdateMetastorePartitionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_UpdateMetastorePartitionRequest_descriptor, + new java.lang.String[] { + "MetastorePartition", "UpdateMask", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsRequest_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "Requests", "TraceId", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsResponse_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_BatchUpdateMetastorePartitionsResponse_descriptor, + new java.lang.String[] { + "Partitions", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsRequest_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "Filter", "TraceId", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsResponse_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_ListMetastorePartitionsResponse_descriptor, + new java.lang.String[] { + "Partitions", "Streams", "Response", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsRequest_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "MetastorePartitions", "SkipExistingPartitions", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsResponse_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsResponse_descriptor, + new java.lang.String[] { + "TotalPartitionsStreamedCount", "TotalPartitionsInsertedCount", + }); + internal_static_google_cloud_bigquery_storage_v1alpha_BatchSizeTooLargeError_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_google_cloud_bigquery_storage_v1alpha_BatchSizeTooLargeError_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha_BatchSizeTooLargeError_descriptor, + new java.lang.String[] { + "MaxBatchSize", "ErrorMessage", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resourceDefinition); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValues.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValues.java new file mode 100644 index 000000000000..28df0b421d2b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValues.java @@ -0,0 +1,760 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Represents the values of a metastore partition.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues} + */ +public final class MetastorePartitionValues extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues) + MetastorePartitionValuesOrBuilder { + private static final long serialVersionUID = 0L; + + // Use MetastorePartitionValues.newBuilder() to construct. + private MetastorePartitionValues(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MetastorePartitionValues() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new MetastorePartitionValues(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionValues_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionValues_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.class, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.Builder.class); + } + + public static final int VALUES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + return values_; + } + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < values_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, values_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < values_.size(); i++) { + dataSize += computeStringSizeNoTag(values_.getRaw(i)); + } + size += dataSize; + size += 1 * getValuesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues other = + (com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues) obj; + + if (!getValuesList().equals(other.getValuesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Represents the values of a metastore partition.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues) + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValuesOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionValues_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionValues_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.class, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_MetastorePartitionValues_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues build() { + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues result = + new com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + values_.makeImmutable(); + result.values_ = values_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues + .getDefaultInstance()) return this; + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ |= 0x00000001; + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureValuesIsMutable(); + values_.add(s); + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureValuesIsMutable() { + if (!values_.isModifiable()) { + values_ = new com.google.protobuf.LazyStringArrayList(values_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + values_.makeImmutable(); + return values_; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The values to set. + * @return This builder for chaining. + */ + public Builder setValues(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The values to add. + * @return This builder for chaining. + */ + public Builder addValues(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The values to add. + * @return This builder for chaining. + */ + public Builder addAllValues(java.lang.Iterable values) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearValues() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the values to add. + * @return This builder for chaining. + */ + public Builder addValuesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues) + private static final com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MetastorePartitionValues parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValuesOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValuesOrBuilder.java new file mode 100644 index 000000000000..5c3d4ae622d8 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValuesOrBuilder.java @@ -0,0 +1,84 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface MetastorePartitionValuesOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.MetastorePartitionValues) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + java.util.List getValuesList(); + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + int getValuesCount(); + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + java.lang.String getValues(int index); + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + com.google.protobuf.ByteString getValuesBytes(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStream.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStream.java new file mode 100644 index 000000000000..ee65e7f9757e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStream.java @@ -0,0 +1,655 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Information about a single stream that is used to read partitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.ReadStream} + */ +public final class ReadStream extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.ReadStream) + ReadStreamOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadStream.newBuilder() to construct. + private ReadStream(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadStream() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadStream(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ReadStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ReadStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.ReadStream.class, + com.google.cloud.bigquery.storage.v1alpha.ReadStream.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Output only. Identifier. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. Identifier. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha.ReadStream)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.ReadStream other = + (com.google.cloud.bigquery.storage.v1alpha.ReadStream) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1alpha.ReadStream prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information about a single stream that is used to read partitions.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.ReadStream} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.ReadStream) + com.google.cloud.bigquery.storage.v1alpha.ReadStreamOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ReadStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ReadStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.ReadStream.class, + com.google.cloud.bigquery.storage.v1alpha.ReadStream.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1alpha.ReadStream.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_ReadStream_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ReadStream getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.ReadStream.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ReadStream build() { + com.google.cloud.bigquery.storage.v1alpha.ReadStream result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ReadStream buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.ReadStream result = + new com.google.cloud.bigquery.storage.v1alpha.ReadStream(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1alpha.ReadStream result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha.ReadStream) { + return mergeFrom((com.google.cloud.bigquery.storage.v1alpha.ReadStream) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1alpha.ReadStream other) { + if (other == com.google.cloud.bigquery.storage.v1alpha.ReadStream.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Output only. Identifier. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. Identifier. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. Identifier. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Identifier. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Identifier. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.ReadStream) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.ReadStream) + private static final com.google.cloud.bigquery.storage.v1alpha.ReadStream DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha.ReadStream(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.ReadStream getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadStream parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ReadStream getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStreamOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStreamOrBuilder.java new file mode 100644 index 000000000000..64fa2ef42fdf --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStreamOrBuilder.java @@ -0,0 +1,58 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface ReadStreamOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.ReadStream) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. Identifier. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Output only. Identifier. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfo.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfo.java new file mode 100644 index 000000000000..7546d878519d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfo.java @@ -0,0 +1,1236 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Serializer and deserializer information.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.SerDeInfo} + */ +public final class SerDeInfo extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.SerDeInfo) + SerDeInfoOrBuilder { + private static final long serialVersionUID = 0L; + + // Use SerDeInfo.newBuilder() to construct. + private SerDeInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SerDeInfo() { + name_ = ""; + serializationLibrary_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SerDeInfo(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 3: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.class, + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Optional. Name of the SerDe.
+   * The maximum length is 256 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Name of the SerDe.
+   * The maximum length is 256 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SERIALIZATION_LIBRARY_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object serializationLibrary_ = ""; + + /** + * + * + *
+   * Required. Specifies a fully-qualified class name of the serialization
+   * library that is responsible for the translation of data between table
+   * representation and the underlying low-level input and output format
+   * structures. The maximum length is 256 characters.
+   * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The serializationLibrary. + */ + @java.lang.Override + public java.lang.String getSerializationLibrary() { + java.lang.Object ref = serializationLibrary_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serializationLibrary_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Specifies a fully-qualified class name of the serialization
+   * library that is responsible for the translation of data between table
+   * representation and the underlying low-level input and output format
+   * structures. The maximum length is 256 characters.
+   * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for serializationLibrary. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializationLibraryBytes() { + java.lang.Object ref = serializationLibrary_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + serializationLibrary_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARAMETERS_FIELD_NUMBER = 3; + + private static final class ParametersDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_ParametersEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField(ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(serializationLibrary_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, serializationLibrary_); + } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, internalGetParameters(), ParametersDefaultEntryHolder.defaultEntry, 3); + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(serializationLibrary_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, serializationLibrary_); + } + for (java.util.Map.Entry entry : + internalGetParameters().getMap().entrySet()) { + com.google.protobuf.MapEntry parameters__ = + ParametersDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, parameters__); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha.SerDeInfo)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo other = + (com.google.cloud.bigquery.storage.v1alpha.SerDeInfo) obj; + + if (!getName().equals(other.getName())) return false; + if (!getSerializationLibrary().equals(other.getSerializationLibrary())) return false; + if (!internalGetParameters().equals(other.internalGetParameters())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + SERIALIZATION_LIBRARY_FIELD_NUMBER; + hash = (53 * hash) + getSerializationLibrary().hashCode(); + if (!internalGetParameters().getMap().isEmpty()) { + hash = (37 * hash) + PARAMETERS_FIELD_NUMBER; + hash = (53 * hash) + internalGetParameters().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1alpha.SerDeInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Serializer and deserializer information.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.SerDeInfo} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.SerDeInfo) + com.google.cloud.bigquery.storage.v1alpha.SerDeInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 3: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 3: + return internalGetMutableParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.class, + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + serializationLibrary_ = ""; + internalGetMutableParameters().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_SerDeInfo_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.SerDeInfo getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.SerDeInfo build() { + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.SerDeInfo buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo result = + new com.google.cloud.bigquery.storage.v1alpha.SerDeInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1alpha.SerDeInfo result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.serializationLibrary_ = serializationLibrary_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.parameters_ = internalGetParameters(); + result.parameters_.makeImmutable(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha.SerDeInfo) { + return mergeFrom((com.google.cloud.bigquery.storage.v1alpha.SerDeInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1alpha.SerDeInfo other) { + if (other == com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getSerializationLibrary().isEmpty()) { + serializationLibrary_ = other.serializationLibrary_; + bitField0_ |= 0x00000002; + onChanged(); + } + internalGetMutableParameters().mergeFrom(other.internalGetParameters()); + bitField0_ |= 0x00000004; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + serializationLibrary_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + com.google.protobuf.MapEntry parameters__ = + input.readMessage( + ParametersDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableParameters() + .getMutableMap() + .put(parameters__.getKey(), parameters__.getValue()); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Optional. Name of the SerDe.
+     * The maximum length is 256 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Name of the SerDe.
+     * The maximum length is 256 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Name of the SerDe.
+     * The maximum length is 256 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Name of the SerDe.
+     * The maximum length is 256 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Name of the SerDe.
+     * The maximum length is 256 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object serializationLibrary_ = ""; + + /** + * + * + *
+     * Required. Specifies a fully-qualified class name of the serialization
+     * library that is responsible for the translation of data between table
+     * representation and the underlying low-level input and output format
+     * structures. The maximum length is 256 characters.
+     * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The serializationLibrary. + */ + public java.lang.String getSerializationLibrary() { + java.lang.Object ref = serializationLibrary_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serializationLibrary_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Specifies a fully-qualified class name of the serialization
+     * library that is responsible for the translation of data between table
+     * representation and the underlying low-level input and output format
+     * structures. The maximum length is 256 characters.
+     * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for serializationLibrary. + */ + public com.google.protobuf.ByteString getSerializationLibraryBytes() { + java.lang.Object ref = serializationLibrary_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + serializationLibrary_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Specifies a fully-qualified class name of the serialization
+     * library that is responsible for the translation of data between table
+     * representation and the underlying low-level input and output format
+     * structures. The maximum length is 256 characters.
+     * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The serializationLibrary to set. + * @return This builder for chaining. + */ + public Builder setSerializationLibrary(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + serializationLibrary_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Specifies a fully-qualified class name of the serialization
+     * library that is responsible for the translation of data between table
+     * representation and the underlying low-level input and output format
+     * structures. The maximum length is 256 characters.
+     * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearSerializationLibrary() { + serializationLibrary_ = getDefaultInstance().getSerializationLibrary(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Specifies a fully-qualified class name of the serialization
+     * library that is responsible for the translation of data between table
+     * representation and the underlying low-level input and output format
+     * structures. The maximum length is 256 characters.
+     * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for serializationLibrary to set. + * @return This builder for chaining. + */ + public Builder setSerializationLibraryBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + serializationLibrary_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField + internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + private com.google.protobuf.MapField + internalGetMutableParameters() { + if (parameters_ == null) { + parameters_ = + com.google.protobuf.MapField.newMapField(ParametersDefaultEntryHolder.defaultEntry); + } + if (!parameters_.isMutable()) { + parameters_ = parameters_.copy(); + } + bitField0_ |= 0x00000004; + onChanged(); + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearParameters() { + bitField0_ = (bitField0_ & ~0x00000004); + internalGetMutableParameters().getMutableMap().clear(); + return this; + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableParameters().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableParameters() { + bitField0_ |= 0x00000004; + return internalGetMutableParameters().getMutableMap(); + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putParameters(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableParameters().getMutableMap().put(key, value); + bitField0_ |= 0x00000004; + return this; + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllParameters(java.util.Map values) { + internalGetMutableParameters().getMutableMap().putAll(values); + bitField0_ |= 0x00000004; + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.SerDeInfo) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.SerDeInfo) + private static final com.google.cloud.bigquery.storage.v1alpha.SerDeInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha.SerDeInfo(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.SerDeInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SerDeInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.SerDeInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfoOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfoOrBuilder.java new file mode 100644 index 000000000000..3c8573ecd2b0 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfoOrBuilder.java @@ -0,0 +1,164 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface SerDeInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.SerDeInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. Name of the SerDe.
+   * The maximum length is 256 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Optional. Name of the SerDe.
+   * The maximum length is 256 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Required. Specifies a fully-qualified class name of the serialization
+   * library that is responsible for the translation of data between table
+   * representation and the underlying low-level input and output format
+   * structures. The maximum length is 256 characters.
+   * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The serializationLibrary. + */ + java.lang.String getSerializationLibrary(); + + /** + * + * + *
+   * Required. Specifies a fully-qualified class name of the serialization
+   * library that is responsible for the translation of data between table
+   * representation and the underlying low-level input and output format
+   * structures. The maximum length is 256 characters.
+   * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for serializationLibrary. + */ + com.google.protobuf.ByteString getSerializationLibraryBytes(); + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getParametersCount(); + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsParameters(java.lang.String key); + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Deprecated + java.util.Map getParameters(); + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getParametersMap(); + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + /* nullable */ + java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getParametersOrThrow(java.lang.String key); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptor.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptor.java new file mode 100644 index 000000000000..800587f1bf7a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptor.java @@ -0,0 +1,1374 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Contains information about the physical storage of the data in the metastore
+ * partition.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.StorageDescriptor} + */ +public final class StorageDescriptor extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.StorageDescriptor) + StorageDescriptorOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StorageDescriptor.newBuilder() to construct. + private StorageDescriptor(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StorageDescriptor() { + locationUri_ = ""; + inputFormat_ = ""; + outputFormat_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StorageDescriptor(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StorageDescriptor_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StorageDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.class, + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.Builder.class); + } + + private int bitField0_; + public static final int LOCATION_URI_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object locationUri_ = ""; + + /** + * + * + *
+   * Optional. The physical location of the metastore partition
+   * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+   * `gs://spark-dataproc-data/pangea-data/*`).
+   * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The locationUri. + */ + @java.lang.Override + public java.lang.String getLocationUri() { + java.lang.Object ref = locationUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationUri_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The physical location of the metastore partition
+   * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+   * `gs://spark-dataproc-data/pangea-data/*`).
+   * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for locationUri. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationUriBytes() { + java.lang.Object ref = locationUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INPUT_FORMAT_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object inputFormat_ = ""; + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the InputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The inputFormat. + */ + @java.lang.Override + public java.lang.String getInputFormat() { + java.lang.Object ref = inputFormat_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + inputFormat_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the InputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for inputFormat. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInputFormatBytes() { + java.lang.Object ref = inputFormat_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + inputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OUTPUT_FORMAT_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object outputFormat_ = ""; + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the OutputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The outputFormat. + */ + @java.lang.Override + public java.lang.String getOutputFormat() { + java.lang.Object ref = outputFormat_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + outputFormat_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the OutputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for outputFormat. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOutputFormatBytes() { + java.lang.Object ref = outputFormat_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + outputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SERDE_INFO_FIELD_NUMBER = 4; + private com.google.cloud.bigquery.storage.v1alpha.SerDeInfo serdeInfo_; + + /** + * + * + *
+   * Optional. Serializer and deserializer information.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the serdeInfo field is set. + */ + @java.lang.Override + public boolean hasSerdeInfo() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. Serializer and deserializer information.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The serdeInfo. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.SerDeInfo getSerdeInfo() { + return serdeInfo_ == null + ? com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.getDefaultInstance() + : serdeInfo_; + } + + /** + * + * + *
+   * Optional. Serializer and deserializer information.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.SerDeInfoOrBuilder getSerdeInfoOrBuilder() { + return serdeInfo_ == null + ? com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.getDefaultInstance() + : serdeInfo_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(locationUri_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, locationUri_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(inputFormat_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, inputFormat_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(outputFormat_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, outputFormat_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getSerdeInfo()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(locationUri_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, locationUri_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(inputFormat_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, inputFormat_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(outputFormat_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, outputFormat_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getSerdeInfo()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor other = + (com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor) obj; + + if (!getLocationUri().equals(other.getLocationUri())) return false; + if (!getInputFormat().equals(other.getInputFormat())) return false; + if (!getOutputFormat().equals(other.getOutputFormat())) return false; + if (hasSerdeInfo() != other.hasSerdeInfo()) return false; + if (hasSerdeInfo()) { + if (!getSerdeInfo().equals(other.getSerdeInfo())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + LOCATION_URI_FIELD_NUMBER; + hash = (53 * hash) + getLocationUri().hashCode(); + hash = (37 * hash) + INPUT_FORMAT_FIELD_NUMBER; + hash = (53 * hash) + getInputFormat().hashCode(); + hash = (37 * hash) + OUTPUT_FORMAT_FIELD_NUMBER; + hash = (53 * hash) + getOutputFormat().hashCode(); + if (hasSerdeInfo()) { + hash = (37 * hash) + SERDE_INFO_FIELD_NUMBER; + hash = (53 * hash) + getSerdeInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Contains information about the physical storage of the data in the metastore
+   * partition.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.StorageDescriptor} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.StorageDescriptor) + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StorageDescriptor_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StorageDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.class, + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getSerdeInfoFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + locationUri_ = ""; + inputFormat_ = ""; + outputFormat_ = ""; + serdeInfo_ = null; + if (serdeInfoBuilder_ != null) { + serdeInfoBuilder_.dispose(); + serdeInfoBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StorageDescriptor_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor build() { + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor result = + new com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.locationUri_ = locationUri_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.inputFormat_ = inputFormat_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.outputFormat_ = outputFormat_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.serdeInfo_ = serdeInfoBuilder_ == null ? serdeInfo_ : serdeInfoBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor) { + return mergeFrom((com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor other) { + if (other == com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor.getDefaultInstance()) + return this; + if (!other.getLocationUri().isEmpty()) { + locationUri_ = other.locationUri_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInputFormat().isEmpty()) { + inputFormat_ = other.inputFormat_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getOutputFormat().isEmpty()) { + outputFormat_ = other.outputFormat_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasSerdeInfo()) { + mergeSerdeInfo(other.getSerdeInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + locationUri_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + inputFormat_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + outputFormat_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage(getSerdeInfoFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object locationUri_ = ""; + + /** + * + * + *
+     * Optional. The physical location of the metastore partition
+     * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+     * `gs://spark-dataproc-data/pangea-data/*`).
+     * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The locationUri. + */ + public java.lang.String getLocationUri() { + java.lang.Object ref = locationUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The physical location of the metastore partition
+     * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+     * `gs://spark-dataproc-data/pangea-data/*`).
+     * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for locationUri. + */ + public com.google.protobuf.ByteString getLocationUriBytes() { + java.lang.Object ref = locationUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The physical location of the metastore partition
+     * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+     * `gs://spark-dataproc-data/pangea-data/*`).
+     * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The locationUri to set. + * @return This builder for chaining. + */ + public Builder setLocationUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + locationUri_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The physical location of the metastore partition
+     * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+     * `gs://spark-dataproc-data/pangea-data/*`).
+     * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLocationUri() { + locationUri_ = getDefaultInstance().getLocationUri(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The physical location of the metastore partition
+     * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+     * `gs://spark-dataproc-data/pangea-data/*`).
+     * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for locationUri to set. + * @return This builder for chaining. + */ + public Builder setLocationUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + locationUri_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object inputFormat_ = ""; + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the InputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The inputFormat. + */ + public java.lang.String getInputFormat() { + java.lang.Object ref = inputFormat_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + inputFormat_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the InputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for inputFormat. + */ + public com.google.protobuf.ByteString getInputFormatBytes() { + java.lang.Object ref = inputFormat_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + inputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the InputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The inputFormat to set. + * @return This builder for chaining. + */ + public Builder setInputFormat(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + inputFormat_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the InputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearInputFormat() { + inputFormat_ = getDefaultInstance().getInputFormat(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the InputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for inputFormat to set. + * @return This builder for chaining. + */ + public Builder setInputFormatBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + inputFormat_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object outputFormat_ = ""; + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the OutputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The outputFormat. + */ + public java.lang.String getOutputFormat() { + java.lang.Object ref = outputFormat_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + outputFormat_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the OutputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for outputFormat. + */ + public com.google.protobuf.ByteString getOutputFormatBytes() { + java.lang.Object ref = outputFormat_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + outputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the OutputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The outputFormat to set. + * @return This builder for chaining. + */ + public Builder setOutputFormat(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + outputFormat_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the OutputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearOutputFormat() { + outputFormat_ = getDefaultInstance().getOutputFormat(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the OutputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for outputFormat to set. + * @return This builder for chaining. + */ + public Builder setOutputFormatBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + outputFormat_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1alpha.SerDeInfo serdeInfo_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo, + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.Builder, + com.google.cloud.bigquery.storage.v1alpha.SerDeInfoOrBuilder> + serdeInfoBuilder_; + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the serdeInfo field is set. + */ + public boolean hasSerdeInfo() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The serdeInfo. + */ + public com.google.cloud.bigquery.storage.v1alpha.SerDeInfo getSerdeInfo() { + if (serdeInfoBuilder_ == null) { + return serdeInfo_ == null + ? com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.getDefaultInstance() + : serdeInfo_; + } else { + return serdeInfoBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSerdeInfo(com.google.cloud.bigquery.storage.v1alpha.SerDeInfo value) { + if (serdeInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + serdeInfo_ = value; + } else { + serdeInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSerdeInfo( + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.Builder builderForValue) { + if (serdeInfoBuilder_ == null) { + serdeInfo_ = builderForValue.build(); + } else { + serdeInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeSerdeInfo(com.google.cloud.bigquery.storage.v1alpha.SerDeInfo value) { + if (serdeInfoBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && serdeInfo_ != null + && serdeInfo_ + != com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.getDefaultInstance()) { + getSerdeInfoBuilder().mergeFrom(value); + } else { + serdeInfo_ = value; + } + } else { + serdeInfoBuilder_.mergeFrom(value); + } + if (serdeInfo_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSerdeInfo() { + bitField0_ = (bitField0_ & ~0x00000008); + serdeInfo_ = null; + if (serdeInfoBuilder_ != null) { + serdeInfoBuilder_.dispose(); + serdeInfoBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.Builder getSerdeInfoBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getSerdeInfoFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.SerDeInfoOrBuilder getSerdeInfoOrBuilder() { + if (serdeInfoBuilder_ != null) { + return serdeInfoBuilder_.getMessageOrBuilder(); + } else { + return serdeInfo_ == null + ? com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.getDefaultInstance() + : serdeInfo_; + } + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo, + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.Builder, + com.google.cloud.bigquery.storage.v1alpha.SerDeInfoOrBuilder> + getSerdeInfoFieldBuilder() { + if (serdeInfoBuilder_ == null) { + serdeInfoBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo, + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo.Builder, + com.google.cloud.bigquery.storage.v1alpha.SerDeInfoOrBuilder>( + getSerdeInfo(), getParentForChildren(), isClean()); + serdeInfo_ = null; + } + return serdeInfoBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.StorageDescriptor) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.StorageDescriptor) + private static final com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StorageDescriptor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StorageDescriptor getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptorOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptorOrBuilder.java new file mode 100644 index 000000000000..491d0beea768 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptorOrBuilder.java @@ -0,0 +1,159 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface StorageDescriptorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.StorageDescriptor) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. The physical location of the metastore partition
+   * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+   * `gs://spark-dataproc-data/pangea-data/*`).
+   * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The locationUri. + */ + java.lang.String getLocationUri(); + + /** + * + * + *
+   * Optional. The physical location of the metastore partition
+   * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+   * `gs://spark-dataproc-data/pangea-data/*`).
+   * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for locationUri. + */ + com.google.protobuf.ByteString getLocationUriBytes(); + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the InputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The inputFormat. + */ + java.lang.String getInputFormat(); + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the InputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for inputFormat. + */ + com.google.protobuf.ByteString getInputFormatBytes(); + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the OutputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The outputFormat. + */ + java.lang.String getOutputFormat(); + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the OutputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for outputFormat. + */ + com.google.protobuf.ByteString getOutputFormatBytes(); + + /** + * + * + *
+   * Optional. Serializer and deserializer information.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the serdeInfo field is set. + */ + boolean hasSerdeInfo(); + + /** + * + * + *
+   * Optional. Serializer and deserializer information.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The serdeInfo. + */ + com.google.cloud.bigquery.storage.v1alpha.SerDeInfo getSerdeInfo(); + + /** + * + * + *
+   * Optional. Serializer and deserializer information.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.SerDeInfoOrBuilder getSerdeInfoOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamList.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamList.java new file mode 100644 index 000000000000..385ae739889f --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamList.java @@ -0,0 +1,1016 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * List of streams.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.StreamList} + */ +public final class StreamList extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.StreamList) + StreamListOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StreamList.newBuilder() to construct. + private StreamList(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamList() { + streams_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StreamList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.StreamList.class, + com.google.cloud.bigquery.storage.v1alpha.StreamList.Builder.class); + } + + public static final int STREAMS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List streams_; + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List getStreamsList() { + return streams_; + } + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getStreamsOrBuilderList() { + return streams_; + } + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getStreamsCount() { + return streams_.size(); + } + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ReadStream getStreams(int index) { + return streams_.get(index); + } + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.ReadStreamOrBuilder getStreamsOrBuilder( + int index) { + return streams_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < streams_.size(); i++) { + output.writeMessage(1, streams_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < streams_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, streams_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha.StreamList)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.StreamList other = + (com.google.cloud.bigquery.storage.v1alpha.StreamList) obj; + + if (!getStreamsList().equals(other.getStreamsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getStreamsCount() > 0) { + hash = (37 * hash) + STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getStreamsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1alpha.StreamList prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * List of streams.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.StreamList} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.StreamList) + com.google.cloud.bigquery.storage.v1alpha.StreamListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.StreamList.class, + com.google.cloud.bigquery.storage.v1alpha.StreamList.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1alpha.StreamList.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + } else { + streams_ = null; + streamsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamList_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamList getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.StreamList.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamList build() { + com.google.cloud.bigquery.storage.v1alpha.StreamList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamList buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.StreamList result = + new com.google.cloud.bigquery.storage.v1alpha.StreamList(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1alpha.StreamList result) { + if (streamsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + streams_ = java.util.Collections.unmodifiableList(streams_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.streams_ = streams_; + } else { + result.streams_ = streamsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1alpha.StreamList result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha.StreamList) { + return mergeFrom((com.google.cloud.bigquery.storage.v1alpha.StreamList) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1alpha.StreamList other) { + if (other == com.google.cloud.bigquery.storage.v1alpha.StreamList.getDefaultInstance()) + return this; + if (streamsBuilder_ == null) { + if (!other.streams_.isEmpty()) { + if (streams_.isEmpty()) { + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureStreamsIsMutable(); + streams_.addAll(other.streams_); + } + onChanged(); + } + } else { + if (!other.streams_.isEmpty()) { + if (streamsBuilder_.isEmpty()) { + streamsBuilder_.dispose(); + streamsBuilder_ = null; + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000001); + streamsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getStreamsFieldBuilder() + : null; + } else { + streamsBuilder_.addAllMessages(other.streams_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1alpha.ReadStream m = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha.ReadStream.parser(), + extensionRegistry); + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(m); + } else { + streamsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List streams_ = + java.util.Collections.emptyList(); + + private void ensureStreamsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + streams_ = + new java.util.ArrayList(streams_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.ReadStream, + com.google.cloud.bigquery.storage.v1alpha.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1alpha.ReadStreamOrBuilder> + streamsBuilder_; + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List getStreamsList() { + if (streamsBuilder_ == null) { + return java.util.Collections.unmodifiableList(streams_); + } else { + return streamsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getStreamsCount() { + if (streamsBuilder_ == null) { + return streams_.size(); + } else { + return streamsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.ReadStream getStreams(int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStreams( + int index, com.google.cloud.bigquery.storage.v1alpha.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.set(index, value); + onChanged(); + } else { + streamsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStreams( + int index, com.google.cloud.bigquery.storage.v1alpha.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.set(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams(com.google.cloud.bigquery.storage.v1alpha.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(value); + onChanged(); + } else { + streamsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + int index, com.google.cloud.bigquery.storage.v1alpha.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(index, value); + onChanged(); + } else { + streamsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + com.google.cloud.bigquery.storage.v1alpha.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + int index, com.google.cloud.bigquery.storage.v1alpha.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllStreams( + java.lang.Iterable values) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, streams_); + onChanged(); + } else { + streamsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearStreams() { + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + streamsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeStreams(int index) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.remove(index); + onChanged(); + } else { + streamsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.ReadStream.Builder getStreamsBuilder( + int index) { + return getStreamsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.ReadStreamOrBuilder getStreamsOrBuilder( + int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStreamsOrBuilderList() { + if (streamsBuilder_ != null) { + return streamsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(streams_); + } + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.ReadStream.Builder addStreamsBuilder() { + return getStreamsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1alpha.ReadStream.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.ReadStream.Builder addStreamsBuilder( + int index) { + return getStreamsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1alpha.ReadStream.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStreamsBuilderList() { + return getStreamsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.ReadStream, + com.google.cloud.bigquery.storage.v1alpha.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1alpha.ReadStreamOrBuilder> + getStreamsFieldBuilder() { + if (streamsBuilder_ == null) { + streamsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.ReadStream, + com.google.cloud.bigquery.storage.v1alpha.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1alpha.ReadStreamOrBuilder>( + streams_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + streams_ = null; + } + return streamsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.StreamList) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.StreamList) + private static final com.google.cloud.bigquery.storage.v1alpha.StreamList DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha.StreamList(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamList getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StreamList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamList getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamListOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamListOrBuilder.java new file mode 100644 index 000000000000..00b28ac19ddd --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamListOrBuilder.java @@ -0,0 +1,92 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface StreamListOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.StreamList) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getStreamsList(); + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.ReadStream getStreams(int index); + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getStreamsCount(); + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getStreamsOrBuilderList(); + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.ReadStreamOrBuilder getStreamsOrBuilder(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequest.java new file mode 100644 index 000000000000..d5005e0dd798 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequest.java @@ -0,0 +1,1406 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * The top-level message sent by the client to the
+ * [Partitions.StreamMetastorePartitions][] method.
+ * Follows the default gRPC streaming maximum size of 4 MB.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest} + */ +public final class StreamMetastorePartitionsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest) + StreamMetastorePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StreamMetastorePartitionsRequest.newBuilder() to construct. + private StreamMetastorePartitionsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamMetastorePartitionsRequest() { + parent_ = ""; + metastorePartitions_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StreamMetastorePartitionsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest.Builder + .class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to where the partition to be added, in the
+   * format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to where the partition to be added, in the
+   * format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int METASTORE_PARTITIONS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List + metastorePartitions_; + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getMetastorePartitionsList() { + return metastorePartitions_; + } + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getMetastorePartitionsOrBuilderList() { + return metastorePartitions_; + } + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getMetastorePartitionsCount() { + return metastorePartitions_.size(); + } + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getMetastorePartitions( + int index) { + return metastorePartitions_.get(index); + } + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getMetastorePartitionsOrBuilder(int index) { + return metastorePartitions_.get(index); + } + + public static final int SKIP_EXISTING_PARTITIONS_FIELD_NUMBER = 3; + private boolean skipExistingPartitions_ = false; + + /** + * + * + *
+   * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+   * add_partitions(..). If the flag is set to false, the server will return
+   * ALREADY_EXISTS on commit if any partition already exists. If the flag is
+   * set to true:
+   *  1) the server will skip existing partitions
+   *  insert only the non-existing partitions as part of the commit.
+   *  2) The client must set the `skip_existing_partitions` field to true for
+   *  all requests in the stream.
+   * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + @java.lang.Override + public boolean getSkipExistingPartitions() { + return skipExistingPartitions_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + for (int i = 0; i < metastorePartitions_.size(); i++) { + output.writeMessage(2, metastorePartitions_.get(i)); + } + if (skipExistingPartitions_ != false) { + output.writeBool(3, skipExistingPartitions_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + for (int i = 0; i < metastorePartitions_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(2, metastorePartitions_.get(i)); + } + if (skipExistingPartitions_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, skipExistingPartitions_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest other = + (com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getMetastorePartitionsList().equals(other.getMetastorePartitionsList())) return false; + if (getSkipExistingPartitions() != other.getSkipExistingPartitions()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getMetastorePartitionsCount() > 0) { + hash = (37 * hash) + METASTORE_PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getMetastorePartitionsList().hashCode(); + } + hash = (37 * hash) + SKIP_EXISTING_PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSkipExistingPartitions()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * The top-level message sent by the client to the
+   * [Partitions.StreamMetastorePartitions][] method.
+   * Follows the default gRPC streaming maximum size of 4 MB.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest) + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + if (metastorePartitionsBuilder_ == null) { + metastorePartitions_ = java.util.Collections.emptyList(); + } else { + metastorePartitions_ = null; + metastorePartitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + skipExistingPartitions_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest build() { + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest result = + new com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest result) { + if (metastorePartitionsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + metastorePartitions_ = java.util.Collections.unmodifiableList(metastorePartitions_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.metastorePartitions_ = metastorePartitions_; + } else { + result.metastorePartitions_ = metastorePartitionsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.skipExistingPartitions_ = skipExistingPartitions_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (metastorePartitionsBuilder_ == null) { + if (!other.metastorePartitions_.isEmpty()) { + if (metastorePartitions_.isEmpty()) { + metastorePartitions_ = other.metastorePartitions_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.addAll(other.metastorePartitions_); + } + onChanged(); + } + } else { + if (!other.metastorePartitions_.isEmpty()) { + if (metastorePartitionsBuilder_.isEmpty()) { + metastorePartitionsBuilder_.dispose(); + metastorePartitionsBuilder_ = null; + metastorePartitions_ = other.metastorePartitions_; + bitField0_ = (bitField0_ & ~0x00000002); + metastorePartitionsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getMetastorePartitionsFieldBuilder() + : null; + } else { + metastorePartitionsBuilder_.addAllMessages(other.metastorePartitions_); + } + } + } + if (other.getSkipExistingPartitions() != false) { + setSkipExistingPartitions(other.getSkipExistingPartitions()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition m = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.parser(), + extensionRegistry); + if (metastorePartitionsBuilder_ == null) { + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.add(m); + } else { + metastorePartitionsBuilder_.addMessage(m); + } + break; + } // case 18 + case 24: + { + skipExistingPartitions_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List + metastorePartitions_ = java.util.Collections.emptyList(); + + private void ensureMetastorePartitionsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + metastorePartitions_ = + new java.util.ArrayList( + metastorePartitions_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + metastorePartitionsBuilder_; + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getMetastorePartitionsList() { + if (metastorePartitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(metastorePartitions_); + } else { + return metastorePartitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getMetastorePartitionsCount() { + if (metastorePartitionsBuilder_ == null) { + return metastorePartitions_.size(); + } else { + return metastorePartitionsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getMetastorePartitions( + int index) { + if (metastorePartitionsBuilder_ == null) { + return metastorePartitions_.get(index); + } else { + return metastorePartitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setMetastorePartitions( + int index, com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (metastorePartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.set(index, value); + onChanged(); + } else { + metastorePartitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setMetastorePartitions( + int index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (metastorePartitionsBuilder_ == null) { + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.set(index, builderForValue.build()); + onChanged(); + } else { + metastorePartitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (metastorePartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.add(value); + onChanged(); + } else { + metastorePartitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addMetastorePartitions( + int index, com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (metastorePartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.add(index, value); + onChanged(); + } else { + metastorePartitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addMetastorePartitions( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (metastorePartitionsBuilder_ == null) { + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.add(builderForValue.build()); + onChanged(); + } else { + metastorePartitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addMetastorePartitions( + int index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (metastorePartitionsBuilder_ == null) { + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.add(index, builderForValue.build()); + onChanged(); + } else { + metastorePartitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllMetastorePartitions( + java.lang.Iterable + values) { + if (metastorePartitionsBuilder_ == null) { + ensureMetastorePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, metastorePartitions_); + onChanged(); + } else { + metastorePartitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearMetastorePartitions() { + if (metastorePartitionsBuilder_ == null) { + metastorePartitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + metastorePartitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeMetastorePartitions(int index) { + if (metastorePartitionsBuilder_ == null) { + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.remove(index); + onChanged(); + } else { + metastorePartitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + getMetastorePartitionsBuilder(int index) { + return getMetastorePartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getMetastorePartitionsOrBuilder(int index) { + if (metastorePartitionsBuilder_ == null) { + return metastorePartitions_.get(index); + } else { + return metastorePartitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getMetastorePartitionsOrBuilderList() { + if (metastorePartitionsBuilder_ != null) { + return metastorePartitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(metastorePartitions_); + } + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + addMetastorePartitionsBuilder() { + return getMetastorePartitionsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + addMetastorePartitionsBuilder(int index) { + return getMetastorePartitionsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getMetastorePartitionsBuilderList() { + return getMetastorePartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getMetastorePartitionsFieldBuilder() { + if (metastorePartitionsBuilder_ == null) { + metastorePartitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder>( + metastorePartitions_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + metastorePartitions_ = null; + } + return metastorePartitionsBuilder_; + } + + private boolean skipExistingPartitions_; + + /** + * + * + *
+     * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+     * add_partitions(..). If the flag is set to false, the server will return
+     * ALREADY_EXISTS on commit if any partition already exists. If the flag is
+     * set to true:
+     *  1) the server will skip existing partitions
+     *  insert only the non-existing partitions as part of the commit.
+     *  2) The client must set the `skip_existing_partitions` field to true for
+     *  all requests in the stream.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + @java.lang.Override + public boolean getSkipExistingPartitions() { + return skipExistingPartitions_; + } + + /** + * + * + *
+     * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+     * add_partitions(..). If the flag is set to false, the server will return
+     * ALREADY_EXISTS on commit if any partition already exists. If the flag is
+     * set to true:
+     *  1) the server will skip existing partitions
+     *  insert only the non-existing partitions as part of the commit.
+     *  2) The client must set the `skip_existing_partitions` field to true for
+     *  all requests in the stream.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The skipExistingPartitions to set. + * @return This builder for chaining. + */ + public Builder setSkipExistingPartitions(boolean value) { + + skipExistingPartitions_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+     * add_partitions(..). If the flag is set to false, the server will return
+     * ALREADY_EXISTS on commit if any partition already exists. If the flag is
+     * set to true:
+     *  1) the server will skip existing partitions
+     *  insert only the non-existing partitions as part of the commit.
+     *  2) The client must set the `skip_existing_partitions` field to true for
+     *  all requests in the stream.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearSkipExistingPartitions() { + bitField0_ = (bitField0_ & ~0x00000004); + skipExistingPartitions_ = false; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest) + private static final com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StreamMetastorePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..f1d3f64e932d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequestOrBuilder.java @@ -0,0 +1,148 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface StreamMetastorePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to where the partition to be added, in the
+   * format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to where the partition to be added, in the
+   * format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getMetastorePartitionsList(); + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getMetastorePartitions(int index); + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getMetastorePartitionsCount(); + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getMetastorePartitionsOrBuilderList(); + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getMetastorePartitionsOrBuilder(int index); + + /** + * + * + *
+   * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+   * add_partitions(..). If the flag is set to false, the server will return
+   * ALREADY_EXISTS on commit if any partition already exists. If the flag is
+   * set to true:
+   *  1) the server will skip existing partitions
+   *  insert only the non-existing partitions as part of the commit.
+   *  2) The client must set the `skip_existing_partitions` field to true for
+   *  all requests in the stream.
+   * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + boolean getSkipExistingPartitions(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponse.java new file mode 100644 index 000000000000..3e4101f4fba7 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponse.java @@ -0,0 +1,687 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * This is the response message sent by the server
+ * to the client for the [Partitions.StreamMetastorePartitions][] method when
+ * the commit is successful. Server will close the stream after sending this
+ * message.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse} + */ +public final class StreamMetastorePartitionsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse) + StreamMetastorePartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StreamMetastorePartitionsResponse.newBuilder() to construct. + private StreamMetastorePartitionsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamMetastorePartitionsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StreamMetastorePartitionsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse.Builder + .class); + } + + public static final int TOTAL_PARTITIONS_STREAMED_COUNT_FIELD_NUMBER = 2; + private long totalPartitionsStreamedCount_ = 0L; + + /** + * + * + *
+   * Total count of partitions streamed by the client during the lifetime of the
+   * stream. This is only set in the final response message before closing the
+   * stream.
+   * 
+ * + * int64 total_partitions_streamed_count = 2; + * + * @return The totalPartitionsStreamedCount. + */ + @java.lang.Override + public long getTotalPartitionsStreamedCount() { + return totalPartitionsStreamedCount_; + } + + public static final int TOTAL_PARTITIONS_INSERTED_COUNT_FIELD_NUMBER = 3; + private long totalPartitionsInsertedCount_ = 0L; + + /** + * + * + *
+   * Total count of partitions inserted by the server during the lifetime of the
+   * stream. This is only set in the final response message before closing the
+   * stream.
+   * 
+ * + * int64 total_partitions_inserted_count = 3; + * + * @return The totalPartitionsInsertedCount. + */ + @java.lang.Override + public long getTotalPartitionsInsertedCount() { + return totalPartitionsInsertedCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (totalPartitionsStreamedCount_ != 0L) { + output.writeInt64(2, totalPartitionsStreamedCount_); + } + if (totalPartitionsInsertedCount_ != 0L) { + output.writeInt64(3, totalPartitionsInsertedCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (totalPartitionsStreamedCount_ != 0L) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size(2, totalPartitionsStreamedCount_); + } + if (totalPartitionsInsertedCount_ != 0L) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size(3, totalPartitionsInsertedCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse other = + (com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse) obj; + + if (getTotalPartitionsStreamedCount() != other.getTotalPartitionsStreamedCount()) return false; + if (getTotalPartitionsInsertedCount() != other.getTotalPartitionsInsertedCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TOTAL_PARTITIONS_STREAMED_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getTotalPartitionsStreamedCount()); + hash = (37 * hash) + TOTAL_PARTITIONS_INSERTED_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getTotalPartitionsInsertedCount()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * This is the response message sent by the server
+   * to the client for the [Partitions.StreamMetastorePartitions][] method when
+   * the commit is successful. Server will close the stream after sending this
+   * message.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse) + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + totalPartitionsStreamedCount_ = 0L; + totalPartitionsInsertedCount_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_StreamMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse build() { + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse result = + new com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.totalPartitionsStreamedCount_ = totalPartitionsStreamedCount_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.totalPartitionsInsertedCount_ = totalPartitionsInsertedCount_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + .getDefaultInstance()) return this; + if (other.getTotalPartitionsStreamedCount() != 0L) { + setTotalPartitionsStreamedCount(other.getTotalPartitionsStreamedCount()); + } + if (other.getTotalPartitionsInsertedCount() != 0L) { + setTotalPartitionsInsertedCount(other.getTotalPartitionsInsertedCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 16: + { + totalPartitionsStreamedCount_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 16 + case 24: + { + totalPartitionsInsertedCount_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long totalPartitionsStreamedCount_; + + /** + * + * + *
+     * Total count of partitions streamed by the client during the lifetime of the
+     * stream. This is only set in the final response message before closing the
+     * stream.
+     * 
+ * + * int64 total_partitions_streamed_count = 2; + * + * @return The totalPartitionsStreamedCount. + */ + @java.lang.Override + public long getTotalPartitionsStreamedCount() { + return totalPartitionsStreamedCount_; + } + + /** + * + * + *
+     * Total count of partitions streamed by the client during the lifetime of the
+     * stream. This is only set in the final response message before closing the
+     * stream.
+     * 
+ * + * int64 total_partitions_streamed_count = 2; + * + * @param value The totalPartitionsStreamedCount to set. + * @return This builder for chaining. + */ + public Builder setTotalPartitionsStreamedCount(long value) { + + totalPartitionsStreamedCount_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Total count of partitions streamed by the client during the lifetime of the
+     * stream. This is only set in the final response message before closing the
+     * stream.
+     * 
+ * + * int64 total_partitions_streamed_count = 2; + * + * @return This builder for chaining. + */ + public Builder clearTotalPartitionsStreamedCount() { + bitField0_ = (bitField0_ & ~0x00000001); + totalPartitionsStreamedCount_ = 0L; + onChanged(); + return this; + } + + private long totalPartitionsInsertedCount_; + + /** + * + * + *
+     * Total count of partitions inserted by the server during the lifetime of the
+     * stream. This is only set in the final response message before closing the
+     * stream.
+     * 
+ * + * int64 total_partitions_inserted_count = 3; + * + * @return The totalPartitionsInsertedCount. + */ + @java.lang.Override + public long getTotalPartitionsInsertedCount() { + return totalPartitionsInsertedCount_; + } + + /** + * + * + *
+     * Total count of partitions inserted by the server during the lifetime of the
+     * stream. This is only set in the final response message before closing the
+     * stream.
+     * 
+ * + * int64 total_partitions_inserted_count = 3; + * + * @param value The totalPartitionsInsertedCount to set. + * @return This builder for chaining. + */ + public Builder setTotalPartitionsInsertedCount(long value) { + + totalPartitionsInsertedCount_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Total count of partitions inserted by the server during the lifetime of the
+     * stream. This is only set in the final response message before closing the
+     * stream.
+     * 
+ * + * int64 total_partitions_inserted_count = 3; + * + * @return This builder for chaining. + */ + public Builder clearTotalPartitionsInsertedCount() { + bitField0_ = (bitField0_ & ~0x00000002); + totalPartitionsInsertedCount_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse) + private static final com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StreamMetastorePartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponseOrBuilder.java new file mode 100644 index 000000000000..dc0403ac08e9 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponseOrBuilder.java @@ -0,0 +1,56 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface StreamMetastorePartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.StreamMetastorePartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Total count of partitions streamed by the client during the lifetime of the
+   * stream. This is only set in the final response message before closing the
+   * stream.
+   * 
+ * + * int64 total_partitions_streamed_count = 2; + * + * @return The totalPartitionsStreamedCount. + */ + long getTotalPartitionsStreamedCount(); + + /** + * + * + *
+   * Total count of partitions inserted by the server during the lifetime of the
+   * stream. This is only set in the final response message before closing the
+   * stream.
+   * 
+ * + * int64 total_partitions_inserted_count = 3; + * + * @return The totalPartitionsInsertedCount. + */ + long getTotalPartitionsInsertedCount(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/TableName.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/TableName.java new file mode 100644 index 000000000000..ac01f24a66a4 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/TableName.java @@ -0,0 +1,217 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1alpha; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class TableName implements ResourceName { + private static final PathTemplate PROJECT_DATASET_TABLE = + PathTemplate.createWithoutUrlEncoding("projects/{project}/datasets/{dataset}/tables/{table}"); + private volatile Map fieldValuesMap; + private final String project; + private final String dataset; + private final String table; + + @Deprecated + protected TableName() { + project = null; + dataset = null; + table = null; + } + + private TableName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + dataset = Preconditions.checkNotNull(builder.getDataset()); + table = Preconditions.checkNotNull(builder.getTable()); + } + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static TableName of(String project, String dataset, String table) { + return newBuilder().setProject(project).setDataset(dataset).setTable(table).build(); + } + + public static String format(String project, String dataset, String table) { + return newBuilder().setProject(project).setDataset(dataset).setTable(table).build().toString(); + } + + public static TableName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_DATASET_TABLE.validatedMatch( + formattedString, "TableName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("dataset"), matchMap.get("table")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (TableName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_DATASET_TABLE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (dataset != null) { + fieldMapBuilder.put("dataset", dataset); + } + if (table != null) { + fieldMapBuilder.put("table", table); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_DATASET_TABLE.instantiate( + "project", project, "dataset", dataset, "table", table); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + TableName that = ((TableName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.dataset, that.dataset) + && Objects.equals(this.table, that.table); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(dataset); + h *= 1000003; + h ^= Objects.hashCode(table); + return h; + } + + /** Builder for projects/{project}/datasets/{dataset}/tables/{table}. */ + public static class Builder { + private String project; + private String dataset; + private String table; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setDataset(String dataset) { + this.dataset = dataset; + return this; + } + + public Builder setTable(String table) { + this.table = table; + return this; + } + + private Builder(TableName tableName) { + this.project = tableName.project; + this.dataset = tableName.dataset; + this.table = tableName.table; + } + + public TableName build() { + return new TableName(this); + } + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequest.java new file mode 100644 index 000000000000..d4f469d8f379 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequest.java @@ -0,0 +1,1079 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +/** + * + * + *
+ * Request message for UpdateMetastorePartition.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest} + */ +public final class UpdateMetastorePartitionRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest) + UpdateMetastorePartitionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use UpdateMetastorePartitionRequest.newBuilder() to construct. + private UpdateMetastorePartitionRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private UpdateMetastorePartitionRequest() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new UpdateMetastorePartitionRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_UpdateMetastorePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_UpdateMetastorePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.class, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.Builder + .class); + } + + private int bitField0_; + public static final int METASTORE_PARTITION_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1alpha.MetastorePartition metastorePartition_; + + /** + * + * + *
+   * Required. The metastore partition to be updated.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the metastorePartition field is set. + */ + @java.lang.Override + public boolean hasMetastorePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The metastore partition to be updated.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The metastorePartition. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getMetastorePartition() { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } + + /** + * + * + *
+   * Required. The metastore partition to be updated.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getMetastorePartitionOrBuilder() { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getMetastorePartition()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getMetastorePartition()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest other = + (com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest) obj; + + if (hasMetastorePartition() != other.hasMetastorePartition()) return false; + if (hasMetastorePartition()) { + if (!getMetastorePartition().equals(other.getMetastorePartition())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasMetastorePartition()) { + hash = (37 * hash) + METASTORE_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getMetastorePartition().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for UpdateMetastorePartition.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest) + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_UpdateMetastorePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_UpdateMetastorePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.class, + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getMetastorePartitionFieldBuilder(); + getUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + metastorePartition_ = null; + if (metastorePartitionBuilder_ != null) { + metastorePartitionBuilder_.dispose(); + metastorePartitionBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1alpha_UpdateMetastorePartitionRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest build() { + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest result = + new com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.metastorePartition_ = + metastorePartitionBuilder_ == null + ? metastorePartition_ + : metastorePartitionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest + .getDefaultInstance()) return this; + if (other.hasMetastorePartition()) { + mergeMetastorePartition(other.getMetastorePartition()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + getMetastorePartitionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1alpha.MetastorePartition metastorePartition_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + metastorePartitionBuilder_; + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the metastorePartition field is set. + */ + public boolean hasMetastorePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The metastorePartition. + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getMetastorePartition() { + if (metastorePartitionBuilder_ == null) { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } else { + return metastorePartitionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setMetastorePartition( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (metastorePartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + metastorePartition_ = value; + } else { + metastorePartitionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setMetastorePartition( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder builderForValue) { + if (metastorePartitionBuilder_ == null) { + metastorePartition_ = builderForValue.build(); + } else { + metastorePartitionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeMetastorePartition( + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition value) { + if (metastorePartitionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && metastorePartition_ != null + && metastorePartition_ + != com.google.cloud.bigquery.storage.v1alpha.MetastorePartition + .getDefaultInstance()) { + getMetastorePartitionBuilder().mergeFrom(value); + } else { + metastorePartition_ = value; + } + } else { + metastorePartitionBuilder_.mergeFrom(value); + } + if (metastorePartition_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearMetastorePartition() { + bitField0_ = (bitField0_ & ~0x00000001); + metastorePartition_ = null; + if (metastorePartitionBuilder_ != null) { + metastorePartitionBuilder_.dispose(); + metastorePartitionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder + getMetastorePartitionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getMetastorePartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getMetastorePartitionOrBuilder() { + if (metastorePartitionBuilder_ != null) { + return metastorePartitionBuilder_.getMessageOrBuilder(); + } else { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder> + getMetastorePartitionFieldBuilder() { + if (metastorePartitionBuilder_ == null) { + metastorePartitionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder>( + getMetastorePartition(), getParentForChildren(), isClean()); + metastorePartition_ = null; + } + return metastorePartitionBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + getUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest) + private static final com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest(); + } + + public static com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateMetastorePartitionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequestOrBuilder.java new file mode 100644 index 000000000000..94714688c470 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequestOrBuilder.java @@ -0,0 +1,110 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1alpha; + +public interface UpdateMetastorePartitionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha.UpdateMetastorePartitionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The metastore partition to be updated.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the metastorePartition field is set. + */ + boolean hasMetastorePartition(); + + /** + * + * + *
+   * Required. The metastore partition to be updated.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The metastorePartition. + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartition getMetastorePartition(); + + /** + * + * + *
+   * Required. The metastore partition to be updated.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1alpha.MetastorePartitionOrBuilder + getMetastorePartitionOrBuilder(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/proto/google/cloud/bigquery/storage/v1alpha/metastore_partition.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/proto/google/cloud/bigquery/storage/v1alpha/metastore_partition.proto new file mode 100644 index 000000000000..68ba61b6b1d9 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/proto/google/cloud/bigquery/storage/v1alpha/metastore_partition.proto @@ -0,0 +1,311 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1alpha; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1alpha/partition.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1Alpha"; +option go_package = "cloud.google.com/go/bigquery/storage/apiv1alpha/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "MetastorePartitionServiceProto"; +option java_package = "com.google.cloud.bigquery.storage.v1alpha"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1alpha"; +option (google.api.resource_definition) = { + type: "bigquery.googleapis.com/Table" + pattern: "projects/{project}/datasets/{dataset}/tables/{table}" +}; + +// BigQuery Metastore Partition Service API. +// This service is used for managing metastore partitions in BigQuery +// metastore. The service supports only batch operations for write. +service MetastorePartitionService { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform"; + + // Adds metastore partitions to a table. + rpc BatchCreateMetastorePartitions(BatchCreateMetastorePartitionsRequest) + returns (BatchCreateMetastorePartitionsResponse) { + option (google.api.http) = { + post: "/v1alpha/{parent=projects/*/datasets/*/tables/*}/partitions:batchCreate" + body: "*" + }; + } + + // Deletes metastore partitions from a table. + rpc BatchDeleteMetastorePartitions(BatchDeleteMetastorePartitionsRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1alpha/{parent=projects/*/datasets/*/tables/*}/partitions:batchDelete" + body: "*" + }; + } + + // Updates metastore partitions in a table. + rpc BatchUpdateMetastorePartitions(BatchUpdateMetastorePartitionsRequest) + returns (BatchUpdateMetastorePartitionsResponse) { + option (google.api.http) = { + post: "/v1alpha/{parent=projects/*/datasets/*/tables/*}/partitions:batchUpdate" + body: "*" + }; + } + + // Gets metastore partitions from a table. + rpc ListMetastorePartitions(ListMetastorePartitionsRequest) + returns (ListMetastorePartitionsResponse) { + option (google.api.http) = { + get: "/v1alpha/{parent=projects/*/locations/*/datasets/*/tables/*}/partitions:list" + }; + option (google.api.method_signature) = "parent"; + } + + // This is a bi-di streaming rpc method that allows the client to send + // a stream of partitions and commit all of them atomically at the end. + // If the commit is successful, the server will return a + // response and close the stream. If the commit fails (due to duplicate + // partitions or other reason), the server will close the stream with an + // error. This method is only available via the gRPC API (not REST). + rpc StreamMetastorePartitions(stream StreamMetastorePartitionsRequest) + returns (stream StreamMetastorePartitionsResponse) {} +} + +// Request message for CreateMetastorePartition. The MetastorePartition is +// uniquely identified by values, which is an ordered list. Hence, there is no +// separate name or partition id field. +message CreateMetastorePartitionRequest { + // Required. Reference to the table to where the metastore partition to be + // added, in the format of + // projects/{project}/databases/{databases}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Required. The metastore partition to be added. + MetastorePartition metastore_partition = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for BatchCreateMetastorePartitions. +message BatchCreateMetastorePartitionsRequest { + // Required. Reference to the table to where the metastore partitions to be + // added, in the format of + // projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Required. Requests to add metastore partitions to the table. + repeated CreateMetastorePartitionRequest requests = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. Mimics the ifNotExists flag in IMetaStoreClient + // add_partitions(..). If the flag is set to false, the server will return + // ALREADY_EXISTS if any partition already exists. If the flag is set to true, + // the server will skip existing partitions and insert only the non-existing + // partitions. A maximum of 900 partitions can be inserted in a batch. + bool skip_existing_partitions = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Optional trace id to be used for debugging. It is expected that + // the client sets the same `trace_id` for all the batches in the same + // operation, so that it is possible to tie together the logs to all the + // batches in the same operation. Limited to 256 characters. This is expected, + // but not required, to be globally unique. + string trace_id = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for BatchCreateMetastorePartitions. +message BatchCreateMetastorePartitionsResponse { + // The list of metastore partitions that have been created. + repeated MetastorePartition partitions = 1; +} + +// Request message for BatchDeleteMetastorePartitions. The MetastorePartition is +// uniquely identified by values, which is an ordered list. Hence, there is no +// separate name or partition id field. +message BatchDeleteMetastorePartitionsRequest { + // Required. Reference to the table to which these metastore partitions + // belong, in the format of + // projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Required. The list of metastore partitions (identified by its values) to be + // deleted. A maximum of 900 partitions can be deleted in a batch. + repeated MetastorePartitionValues partition_values = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. Optional trace id to be used for debugging. It is expected that + // the client sets the same `trace_id` for all the batches in the same + // operation, so that it is possible to tie together the logs to all the + // batches in the same operation. This is expected, but not required, to be + // globally unique. + string trace_id = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for UpdateMetastorePartition. +message UpdateMetastorePartitionRequest { + // Required. The metastore partition to be updated. + MetastorePartition metastore_partition = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. The list of fields to update. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for BatchUpdateMetastorePartitions. +message BatchUpdateMetastorePartitionsRequest { + // Required. Reference to the table to which these metastore partitions + // belong, in the format of + // projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Required. Requests to update metastore partitions in the table. + repeated UpdateMetastorePartitionRequest requests = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. Optional trace id to be used for debugging. It is expected that + // the client sets the same `trace_id` for all the batches in the same + // operation, so that it is possible to tie together the logs to all the + // batches in the same operation. This is expected, but not required, to be + // globally unique. + string trace_id = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for BatchUpdateMetastorePartitions. +message BatchUpdateMetastorePartitionsResponse { + // The list of metastore partitions that have been updated. + // A maximum of 900 partitions can be updated in a batch. + repeated MetastorePartition partitions = 1; +} + +// Request message for ListMetastorePartitions. +message ListMetastorePartitionsRequest { + // Required. Reference to the table to which these metastore partitions + // belong, in the format of + // projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Optional. SQL text filtering statement, similar to a WHERE clause in a + // query. Only supports single-row expressions. Aggregate functions are not + // supported. + // + // Examples: "int_field > 5" + // "date_field = CAST('2014-9-27' as DATE)" + // "nullable_field is not NULL" + // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" + // "numeric_field BETWEEN 1.0 AND 5.0" + // Restricted to a maximum length for 1 MB. + string filter = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Optional trace id to be used for debugging. It is expected that + // the client sets the same `trace_id` for all the batches in the same + // operation, so that it is possible to tie together the logs to all the + // batches in the same operation. Limited to 256 characters. This is expected, + // but not required, to be globally unique. + string trace_id = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for ListMetastorePartitions. +message ListMetastorePartitionsResponse { + // The response depends on the number of metastore partitions to be returned; + // it can be a list of partitions or a list of + // [ReadStream]((https://cloud.google.com/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1#readstream)) + // objects. For the second situation, the BigQuery [Read API + // ReadRows](https://cloud.google.com/bigquery/docs/reference/storage#read_from_a_session_stream) + // method must be used to stream the data and convert it into a list of + // partitions. + oneof response { + // The list of partitions. + MetastorePartitionList partitions = 1; + + // The list of streams. + StreamList streams = 2; + } +} + +// The top-level message sent by the client to the +// [Partitions.StreamMetastorePartitions][] method. +// Follows the default gRPC streaming maximum size of 4 MB. +message StreamMetastorePartitionsRequest { + // Required. Reference to the table to where the partition to be added, in the + // format of + // projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Optional. A list of metastore partitions to be added to the table. + repeated MetastorePartition metastore_partitions = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Mimics the ifNotExists flag in IMetaStoreClient + // add_partitions(..). If the flag is set to false, the server will return + // ALREADY_EXISTS on commit if any partition already exists. If the flag is + // set to true: + // 1) the server will skip existing partitions + // insert only the non-existing partitions as part of the commit. + // 2) The client must set the `skip_existing_partitions` field to true for + // all requests in the stream. + bool skip_existing_partitions = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// This is the response message sent by the server +// to the client for the [Partitions.StreamMetastorePartitions][] method when +// the commit is successful. Server will close the stream after sending this +// message. +message StreamMetastorePartitionsResponse { + // Total count of partitions streamed by the client during the lifetime of the + // stream. This is only set in the final response message before closing the + // stream. + int64 total_partitions_streamed_count = 2; + + // Total count of partitions inserted by the server during the lifetime of the + // stream. This is only set in the final response message before closing the + // stream. + int64 total_partitions_inserted_count = 3; +} + +// Structured custom error message for batch size too large error. +// The error can be attached as error details in the returned rpc Status for +// more structured error handling in the client. +message BatchSizeTooLargeError { + // The maximum number of items that are supported in a single batch. This is + // returned as a hint to the client to adjust the batch size. + int64 max_batch_size = 1; + + // Optional. The error message that is returned to the client. + string error_message = 2 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/proto/google/cloud/bigquery/storage/v1alpha/partition.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/proto/google/cloud/bigquery/storage/v1alpha/partition.proto new file mode 100644 index 000000000000..7e9c332db6f2 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1alpha/src/main/proto/google/cloud/bigquery/storage/v1alpha/partition.proto @@ -0,0 +1,140 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1alpha; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1Alpha"; +option go_package = "cloud.google.com/go/bigquery/storage/apiv1alpha/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "MetastorePartitionProto"; +option java_package = "com.google.cloud.bigquery.storage.v1alpha"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1alpha"; + +// Schema description of a metastore partition column. +message FieldSchema { + // Required. The name of the column. + // The maximum length of the name is 1024 characters + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The type of the metastore partition column. Maximum allowed + // length is 1024 characters. + string type = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Contains information about the physical storage of the data in the metastore +// partition. +message StorageDescriptor { + // Optional. The physical location of the metastore partition + // (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or + // `gs://spark-dataproc-data/pangea-data/*`). + string location_uri = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the fully qualified class name of the InputFormat + // (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"). + // The maximum length is 128 characters. + string input_format = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the fully qualified class name of the OutputFormat + // (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"). + // The maximum length is 128 characters. + string output_format = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Serializer and deserializer information. + SerDeInfo serde_info = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Serializer and deserializer information. +message SerDeInfo { + // Optional. Name of the SerDe. + // The maximum length is 256 characters. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. Specifies a fully-qualified class name of the serialization + // library that is responsible for the translation of data between table + // representation and the underlying low-level input and output format + // structures. The maximum length is 256 characters. + string serialization_library = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Key-value pairs that define the initialization parameters for the + // serialization library. + // Maximum size 10 Kib. + map parameters = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Information about a Hive partition. +message MetastorePartition { + // Required. Represents the values of the partition keys, where each value + // corresponds to a specific partition key in the order in which the keys are + // defined. Each value is limited to 1024 characters. + repeated string values = 1 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The creation time of the partition. + google.protobuf.Timestamp create_time = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Contains information about the physical storage of the data in + // the partition. + StorageDescriptor storage_descriptor = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional parameters or metadata associated with the partition. + // Maximum size 10 KiB. + map parameters = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. List of columns. + repeated FieldSchema fields = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// List of metastore partitions. +message MetastorePartitionList { + // Required. List of partitions. + repeated MetastorePartition partitions = 1 + [(google.api.field_behavior) = REQUIRED]; +} + +// Information about a single stream that is used to read partitions. +message ReadStream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadStream" + pattern: "projects/{project}/locations/{location}/sessions/{session}/streams/{stream}" + plural: "readStreams" + singular: "readStream" + }; + + // Output only. Identifier. Name of the stream, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`. + string name = 1 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = IDENTIFIER + ]; +} + +// List of streams. +message StreamList { + // Output only. List of streams. + repeated ReadStream streams = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Represents the values of a metastore partition. +message MetastorePartitionValues { + // Required. The values of the partition keys, where each value corresponds to + // a specific partition key in the order in which the keys are defined. + repeated string values = 1 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/clirr-ignored-differences.xml b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/clirr-ignored-differences.xml new file mode 100644 index 000000000000..70e35c15384c --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/clirr-ignored-differences.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/pom.xml b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/pom.xml new file mode 100644 index 000000000000..4df46f4cb7ec --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/pom.xml @@ -0,0 +1,41 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta + 3.19.1 + proto-google-cloud-bigquerystorage-v1beta + Proto library for google-cloud-bigquerystorage + + com.google.cloud + google-cloud-bigquerystorage-parent + 3.19.1 + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api + api-common + + + com.google.guava + guava + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequest.java new file mode 100644 index 000000000000..a4be7c32cbd5 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequest.java @@ -0,0 +1,1619 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Request message for BatchCreateMetastorePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest} + */ +public final class BatchCreateMetastorePartitionsRequest + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest) + BatchCreateMetastorePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchCreateMetastorePartitionsRequest.newBuilder() to construct. + private BatchCreateMetastorePartitionsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCreateMetastorePartitionsRequest() { + parent_ = ""; + requests_ = java.util.Collections.emptyList(); + traceId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCreateMetastorePartitionsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest.Builder + .class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partitions to be
+   * added, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partitions to be
+   * added, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUESTS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List + requests_; + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getRequestsList() { + return requests_; + } + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + ? extends + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequestOrBuilder> + getRequestsOrBuilderList() { + return requests_; + } + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getRequestsCount() { + return requests_.size(); + } + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest getRequests( + int index) { + return requests_.get(index); + } + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequestOrBuilder + getRequestsOrBuilder(int index) { + return requests_.get(index); + } + + public static final int SKIP_EXISTING_PARTITIONS_FIELD_NUMBER = 3; + private boolean skipExistingPartitions_ = false; + + /** + * + * + *
+   * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+   * add_partitions(..). If the flag is set to false, the server will return
+   * ALREADY_EXISTS if any partition already exists. If the flag is set to true,
+   * the server will skip existing partitions and insert only the non-existing
+   * partitions. A maximum of 900 partitions can be inserted in a batch.
+   * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + @java.lang.Override + public boolean getSkipExistingPartitions() { + return skipExistingPartitions_; + } + + public static final int TRACE_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object traceId_ = ""; + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + @java.lang.Override + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + for (int i = 0; i < requests_.size(); i++) { + output.writeMessage(2, requests_.get(i)); + } + if (skipExistingPartitions_ != false) { + output.writeBool(3, skipExistingPartitions_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, traceId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + for (int i = 0; i < requests_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, requests_.get(i)); + } + if (skipExistingPartitions_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, skipExistingPartitions_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, traceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest other = + (com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getRequestsList().equals(other.getRequestsList())) return false; + if (getSkipExistingPartitions() != other.getSkipExistingPartitions()) return false; + if (!getTraceId().equals(other.getTraceId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getRequestsCount() > 0) { + hash = (37 * hash) + REQUESTS_FIELD_NUMBER; + hash = (53 * hash) + getRequestsList().hashCode(); + } + hash = (37 * hash) + SKIP_EXISTING_PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSkipExistingPartitions()); + hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTraceId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for BatchCreateMetastorePartitions.
+   * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest) + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + if (requestsBuilder_ == null) { + requests_ = java.util.Collections.emptyList(); + } else { + requests_ = null; + requestsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + skipExistingPartitions_ = false; + traceId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest build() { + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest result = + new com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest result) { + if (requestsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + requests_ = java.util.Collections.unmodifiableList(requests_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.requests_ = requests_; + } else { + result.requests_ = requestsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.skipExistingPartitions_ = skipExistingPartitions_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.traceId_ = traceId_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (requestsBuilder_ == null) { + if (!other.requests_.isEmpty()) { + if (requests_.isEmpty()) { + requests_ = other.requests_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRequestsIsMutable(); + requests_.addAll(other.requests_); + } + onChanged(); + } + } else { + if (!other.requests_.isEmpty()) { + if (requestsBuilder_.isEmpty()) { + requestsBuilder_.dispose(); + requestsBuilder_ = null; + requests_ = other.requests_; + bitField0_ = (bitField0_ & ~0x00000002); + requestsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getRequestsFieldBuilder() + : null; + } else { + requestsBuilder_.addAllMessages(other.requests_); + } + } + } + if (other.getSkipExistingPartitions() != false) { + setSkipExistingPartitions(other.getSkipExistingPartitions()); + } + if (!other.getTraceId().isEmpty()) { + traceId_ = other.traceId_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest + .parser(), + extensionRegistry); + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(m); + } else { + requestsBuilder_.addMessage(m); + } + break; + } // case 18 + case 24: + { + skipExistingPartitions_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + traceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partitions to be
+     * added, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partitions to be
+     * added, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partitions to be
+     * added, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partitions to be
+     * added, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partitions to be
+     * added, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List + requests_ = java.util.Collections.emptyList(); + + private void ensureRequestsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + requests_ = + new java.util.ArrayList< + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest>( + requests_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest, + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.Builder, + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequestOrBuilder> + requestsBuilder_; + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getRequestsList() { + if (requestsBuilder_ == null) { + return java.util.Collections.unmodifiableList(requests_); + } else { + return requestsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getRequestsCount() { + if (requestsBuilder_ == null) { + return requests_.size(); + } else { + return requestsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest getRequests( + int index) { + if (requestsBuilder_ == null) { + return requests_.get(index); + } else { + return requestsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setRequests( + int index, com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.set(index, value); + onChanged(); + } else { + requestsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setRequests( + int index, + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.Builder + builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.set(index, builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.add(value); + onChanged(); + } else { + requestsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + int index, com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.add(index, value); + onChanged(); + } else { + requestsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.Builder + builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + int index, + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.Builder + builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(index, builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllRequests( + java.lang.Iterable< + ? extends com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest> + values) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, requests_); + onChanged(); + } else { + requestsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearRequests() { + if (requestsBuilder_ == null) { + requests_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + requestsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeRequests(int index) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.remove(index); + onChanged(); + } else { + requestsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.Builder + getRequestsBuilder(int index) { + return getRequestsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequestOrBuilder + getRequestsOrBuilder(int index) { + if (requestsBuilder_ == null) { + return requests_.get(index); + } else { + return requestsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + ? extends + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequestOrBuilder> + getRequestsOrBuilderList() { + if (requestsBuilder_ != null) { + return requestsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(requests_); + } + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.Builder + addRequestsBuilder() { + return getRequestsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest + .getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.Builder + addRequestsBuilder(int index) { + return getRequestsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest + .getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Requests to add metastore partitions to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.Builder> + getRequestsBuilderList() { + return getRequestsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest, + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.Builder, + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequestOrBuilder> + getRequestsFieldBuilder() { + if (requestsBuilder_ == null) { + requestsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest, + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.Builder, + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequestOrBuilder>( + requests_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + requests_ = null; + } + return requestsBuilder_; + } + + private boolean skipExistingPartitions_; + + /** + * + * + *
+     * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+     * add_partitions(..). If the flag is set to false, the server will return
+     * ALREADY_EXISTS if any partition already exists. If the flag is set to true,
+     * the server will skip existing partitions and insert only the non-existing
+     * partitions. A maximum of 900 partitions can be inserted in a batch.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + @java.lang.Override + public boolean getSkipExistingPartitions() { + return skipExistingPartitions_; + } + + /** + * + * + *
+     * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+     * add_partitions(..). If the flag is set to false, the server will return
+     * ALREADY_EXISTS if any partition already exists. If the flag is set to true,
+     * the server will skip existing partitions and insert only the non-existing
+     * partitions. A maximum of 900 partitions can be inserted in a batch.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The skipExistingPartitions to set. + * @return This builder for chaining. + */ + public Builder setSkipExistingPartitions(boolean value) { + + skipExistingPartitions_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+     * add_partitions(..). If the flag is set to false, the server will return
+     * ALREADY_EXISTS if any partition already exists. If the flag is set to true,
+     * the server will skip existing partitions and insert only the non-existing
+     * partitions. A maximum of 900 partitions can be inserted in a batch.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearSkipExistingPartitions() { + bitField0_ = (bitField0_ & ~0x00000004); + skipExistingPartitions_ = false; + onChanged(); + return this; + } + + private java.lang.Object traceId_ = ""; + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + traceId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearTraceId() { + traceId_ = getDefaultInstance().getTraceId(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + traceId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest) + private static final com.google.cloud.bigquery.storage.v1beta + .BatchCreateMetastorePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCreateMetastorePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..5b0b7805d0a1 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequestOrBuilder.java @@ -0,0 +1,181 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface BatchCreateMetastorePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partitions to be
+   * added, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partitions to be
+   * added, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getRequestsList(); + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest getRequests(int index); + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getRequestsCount(); + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List< + ? extends + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequestOrBuilder> + getRequestsOrBuilderList(); + + /** + * + * + *
+   * Required. Requests to add metastore partitions to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequestOrBuilder + getRequestsOrBuilder(int index); + + /** + * + * + *
+   * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+   * add_partitions(..). If the flag is set to false, the server will return
+   * ALREADY_EXISTS if any partition already exists. If the flag is set to true,
+   * the server will skip existing partitions and insert only the non-existing
+   * partitions. A maximum of 900 partitions can be inserted in a batch.
+   * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + boolean getSkipExistingPartitions(); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + java.lang.String getTraceId(); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + com.google.protobuf.ByteString getTraceIdBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponse.java new file mode 100644 index 000000000000..87a6f468079e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponse.java @@ -0,0 +1,1031 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Response message for BatchCreateMetastorePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse} + */ +public final class BatchCreateMetastorePartitionsResponse + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse) + BatchCreateMetastorePartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchCreateMetastorePartitionsResponse.newBuilder() to construct. + private BatchCreateMetastorePartitionsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCreateMetastorePartitionsResponse() { + partitions_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCreateMetastorePartitionsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse.Builder + .class); + } + + public static final int PARTITIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List partitions_; + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + @java.lang.Override + public java.util.List + getPartitionsList() { + return partitions_; + } + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getPartitionsOrBuilderList() { + return partitions_; + } + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + @java.lang.Override + public int getPartitionsCount() { + return partitions_.size(); + } + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getPartitions(int index) { + return partitions_.get(index); + } + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getPartitionsOrBuilder(int index) { + return partitions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < partitions_.size(); i++) { + output.writeMessage(1, partitions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < partitions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, partitions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse other = + (com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse) obj; + + if (!getPartitionsList().equals(other.getPartitionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPartitionsCount() > 0) { + hash = (37 * hash) + PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for BatchCreateMetastorePartitions.
+   * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse) + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + .Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + } else { + partitions_ = null; + partitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse build() { + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + buildPartial() { + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse result = + new com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse result) { + if (partitionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + partitions_ = java.util.Collections.unmodifiableList(partitions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.partitions_ = partitions_; + } else { + result.partitions_ = partitionsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + .getDefaultInstance()) return this; + if (partitionsBuilder_ == null) { + if (!other.partitions_.isEmpty()) { + if (partitions_.isEmpty()) { + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePartitionsIsMutable(); + partitions_.addAll(other.partitions_); + } + onChanged(); + } + } else { + if (!other.partitions_.isEmpty()) { + if (partitionsBuilder_.isEmpty()) { + partitionsBuilder_.dispose(); + partitionsBuilder_ = null; + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + partitionsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getPartitionsFieldBuilder() + : null; + } else { + partitionsBuilder_.addAllMessages(other.partitions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1beta.MetastorePartition m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.parser(), + extensionRegistry); + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(m); + } else { + partitionsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List + partitions_ = java.util.Collections.emptyList(); + + private void ensurePartitionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + partitions_ = + new java.util.ArrayList( + partitions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + partitionsBuilder_; + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public java.util.List + getPartitionsList() { + if (partitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitions_); + } else { + return partitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public int getPartitionsCount() { + if (partitionsBuilder_ == null) { + return partitions_.size(); + } else { + return partitionsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getPartitions(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder setPartitions( + int index, com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.set(index, value); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder setPartitions( + int index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(value); + onChanged(); + } else { + partitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + int index, com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(index, value); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + int index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder addAllPartitions( + java.lang.Iterable + values) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitions_); + onChanged(); + } else { + partitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder clearPartitions() { + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + partitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder removePartitions(int index) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.remove(index); + onChanged(); + } else { + partitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder getPartitionsBuilder( + int index) { + return getPartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getPartitionsOrBuilder(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getPartitionsOrBuilderList() { + if (partitionsBuilder_ != null) { + return partitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitions_); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder + addPartitionsBuilder() { + return getPartitionsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder addPartitionsBuilder( + int index) { + return getPartitionsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of metastore partitions that have been created.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public java.util.List + getPartitionsBuilderList() { + return getPartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getPartitionsFieldBuilder() { + if (partitionsBuilder_ == null) { + partitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder>( + partitions_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + partitions_ = null; + } + return partitionsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse) + private static final com.google.cloud.bigquery.storage.v1beta + .BatchCreateMetastorePartitionsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCreateMetastorePartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponseOrBuilder.java new file mode 100644 index 000000000000..553318a77cb6 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponseOrBuilder.java @@ -0,0 +1,83 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface BatchCreateMetastorePartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.BatchCreateMetastorePartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + java.util.List getPartitionsList(); + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartition getPartitions(int index); + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + int getPartitionsCount(); + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + java.util.List + getPartitionsOrBuilderList(); + + /** + * + * + *
+   * The list of metastore partitions that have been created.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder getPartitionsOrBuilder( + int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequest.java new file mode 100644 index 000000000000..3e868c3dd936 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequest.java @@ -0,0 +1,1529 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Request message for BatchDeleteMetastorePartitions. The MetastorePartition is
+ * uniquely identified by values, which is an ordered list. Hence, there is no
+ * separate name or partition id field.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest} + */ +public final class BatchDeleteMetastorePartitionsRequest + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest) + BatchDeleteMetastorePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchDeleteMetastorePartitionsRequest.newBuilder() to construct. + private BatchDeleteMetastorePartitionsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchDeleteMetastorePartitionsRequest() { + parent_ = ""; + partitionValues_ = java.util.Collections.emptyList(); + traceId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchDeleteMetastorePartitionsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchDeleteMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchDeleteMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest.Builder + .class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARTITION_VALUES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List + partitionValues_; + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getPartitionValuesList() { + return partitionValues_; + } + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValuesOrBuilder> + getPartitionValuesOrBuilderList() { + return partitionValues_; + } + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getPartitionValuesCount() { + return partitionValues_.size(); + } + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues getPartitionValues( + int index) { + return partitionValues_.get(index); + } + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValuesOrBuilder + getPartitionValuesOrBuilder(int index) { + return partitionValues_.get(index); + } + + public static final int TRACE_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object traceId_ = ""; + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + @java.lang.Override + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + for (int i = 0; i < partitionValues_.size(); i++) { + output.writeMessage(2, partitionValues_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, traceId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + for (int i = 0; i < partitionValues_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, partitionValues_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, traceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest other = + (com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getPartitionValuesList().equals(other.getPartitionValuesList())) return false; + if (!getTraceId().equals(other.getTraceId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getPartitionValuesCount() > 0) { + hash = (37 * hash) + PARTITION_VALUES_FIELD_NUMBER; + hash = (53 * hash) + getPartitionValuesList().hashCode(); + } + hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTraceId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for BatchDeleteMetastorePartitions. The MetastorePartition is
+   * uniquely identified by values, which is an ordered list. Hence, there is no
+   * separate name or partition id field.
+   * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest) + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchDeleteMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchDeleteMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + if (partitionValuesBuilder_ == null) { + partitionValues_ = java.util.Collections.emptyList(); + } else { + partitionValues_ = null; + partitionValuesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + traceId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchDeleteMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest build() { + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest result = + new com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest result) { + if (partitionValuesBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + partitionValues_ = java.util.Collections.unmodifiableList(partitionValues_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.partitionValues_ = partitionValues_; + } else { + result.partitionValues_ = partitionValuesBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.traceId_ = traceId_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (partitionValuesBuilder_ == null) { + if (!other.partitionValues_.isEmpty()) { + if (partitionValues_.isEmpty()) { + partitionValues_ = other.partitionValues_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensurePartitionValuesIsMutable(); + partitionValues_.addAll(other.partitionValues_); + } + onChanged(); + } + } else { + if (!other.partitionValues_.isEmpty()) { + if (partitionValuesBuilder_.isEmpty()) { + partitionValuesBuilder_.dispose(); + partitionValuesBuilder_ = null; + partitionValues_ = other.partitionValues_; + bitField0_ = (bitField0_ & ~0x00000002); + partitionValuesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getPartitionValuesFieldBuilder() + : null; + } else { + partitionValuesBuilder_.addAllMessages(other.partitionValues_); + } + } + } + if (!other.getTraceId().isEmpty()) { + traceId_ = other.traceId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.parser(), + extensionRegistry); + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.add(m); + } else { + partitionValuesBuilder_.addMessage(m); + } + break; + } // case 18 + case 34: + { + traceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List + partitionValues_ = java.util.Collections.emptyList(); + + private void ensurePartitionValuesIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + partitionValues_ = + new java.util.ArrayList< + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues>( + partitionValues_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValuesOrBuilder> + partitionValuesBuilder_; + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getPartitionValuesList() { + if (partitionValuesBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitionValues_); + } else { + return partitionValuesBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getPartitionValuesCount() { + if (partitionValuesBuilder_ == null) { + return partitionValues_.size(); + } else { + return partitionValuesBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues getPartitionValues( + int index) { + if (partitionValuesBuilder_ == null) { + return partitionValues_.get(index); + } else { + return partitionValuesBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartitionValues( + int index, com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues value) { + if (partitionValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionValuesIsMutable(); + partitionValues_.set(index, value); + onChanged(); + } else { + partitionValuesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartitionValues( + int index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.Builder builderForValue) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionValuesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitionValues( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues value) { + if (partitionValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionValuesIsMutable(); + partitionValues_.add(value); + onChanged(); + } else { + partitionValuesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitionValues( + int index, com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues value) { + if (partitionValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionValuesIsMutable(); + partitionValues_.add(index, value); + onChanged(); + } else { + partitionValuesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitionValues( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.Builder builderForValue) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.add(builderForValue.build()); + onChanged(); + } else { + partitionValuesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitionValues( + int index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.Builder builderForValue) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionValuesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllPartitionValues( + java.lang.Iterable< + ? extends com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues> + values) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitionValues_); + onChanged(); + } else { + partitionValuesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearPartitionValues() { + if (partitionValuesBuilder_ == null) { + partitionValues_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + partitionValuesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removePartitionValues(int index) { + if (partitionValuesBuilder_ == null) { + ensurePartitionValuesIsMutable(); + partitionValues_.remove(index); + onChanged(); + } else { + partitionValuesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.Builder + getPartitionValuesBuilder(int index) { + return getPartitionValuesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValuesOrBuilder + getPartitionValuesOrBuilder(int index) { + if (partitionValuesBuilder_ == null) { + return partitionValues_.get(index); + } else { + return partitionValuesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValuesOrBuilder> + getPartitionValuesOrBuilderList() { + if (partitionValuesBuilder_ != null) { + return partitionValuesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitionValues_); + } + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.Builder + addPartitionValuesBuilder() { + return getPartitionValuesFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues + .getDefaultInstance()); + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.Builder + addPartitionValuesBuilder(int index) { + return getPartitionValuesFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues + .getDefaultInstance()); + } + + /** + * + * + *
+     * Required. The list of metastore partitions (identified by its values) to be
+     * deleted. A maximum of 900 partitions can be deleted in a batch.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getPartitionValuesBuilderList() { + return getPartitionValuesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValuesOrBuilder> + getPartitionValuesFieldBuilder() { + if (partitionValuesBuilder_ == null) { + partitionValuesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValuesOrBuilder>( + partitionValues_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + partitionValues_ = null; + } + return partitionValuesBuilder_; + } + + private java.lang.Object traceId_ = ""; + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + traceId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearTraceId() { + traceId_ = getDefaultInstance().getTraceId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + traceId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest) + private static final com.google.cloud.bigquery.storage.v1beta + .BatchDeleteMetastorePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchDeleteMetastorePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..92e7ec7802cd --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequestOrBuilder.java @@ -0,0 +1,168 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface BatchDeleteMetastorePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.BatchDeleteMetastorePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getPartitionValuesList(); + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues getPartitionValues(int index); + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getPartitionValuesCount(); + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValuesOrBuilder> + getPartitionValuesOrBuilderList(); + + /** + * + * + *
+   * Required. The list of metastore partitions (identified by its values) to be
+   * deleted. A maximum of 900 partitions can be deleted in a batch.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartitionValues partition_values = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValuesOrBuilder + getPartitionValuesOrBuilder(int index); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + java.lang.String getTraceId(); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + com.google.protobuf.ByteString getTraceIdBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeError.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeError.java new file mode 100644 index 000000000000..5266081a10d6 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeError.java @@ -0,0 +1,747 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Structured custom error message for batch size too large error.
+ * The error can be attached as error details in the returned rpc Status for
+ * more structured error handling in the client.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError} + */ +public final class BatchSizeTooLargeError extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError) + BatchSizeTooLargeErrorOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchSizeTooLargeError.newBuilder() to construct. + private BatchSizeTooLargeError(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchSizeTooLargeError() { + errorMessage_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchSizeTooLargeError(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchSizeTooLargeError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchSizeTooLargeError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError.class, + com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError.Builder.class); + } + + public static final int MAX_BATCH_SIZE_FIELD_NUMBER = 1; + private long maxBatchSize_ = 0L; + + /** + * + * + *
+   * The maximum number of items that are supported in a single batch. This is
+   * returned as a hint to the client to adjust the batch size.
+   * 
+ * + * int64 max_batch_size = 1; + * + * @return The maxBatchSize. + */ + @java.lang.Override + public long getMaxBatchSize() { + return maxBatchSize_; + } + + public static final int ERROR_MESSAGE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object errorMessage_ = ""; + + /** + * + * + *
+   * Optional. The error message that is returned to the client.
+   * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The errorMessage. + */ + @java.lang.Override + public java.lang.String getErrorMessage() { + java.lang.Object ref = errorMessage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + errorMessage_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The error message that is returned to the client.
+   * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for errorMessage. + */ + @java.lang.Override + public com.google.protobuf.ByteString getErrorMessageBytes() { + java.lang.Object ref = errorMessage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + errorMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (maxBatchSize_ != 0L) { + output.writeInt64(1, maxBatchSize_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(errorMessage_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, errorMessage_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (maxBatchSize_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, maxBatchSize_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(errorMessage_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, errorMessage_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError other = + (com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError) obj; + + if (getMaxBatchSize() != other.getMaxBatchSize()) return false; + if (!getErrorMessage().equals(other.getErrorMessage())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MAX_BATCH_SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMaxBatchSize()); + hash = (37 * hash) + ERROR_MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getErrorMessage().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Structured custom error message for batch size too large error.
+   * The error can be attached as error details in the returned rpc Status for
+   * more structured error handling in the client.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError) + com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeErrorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchSizeTooLargeError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchSizeTooLargeError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError.class, + com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + maxBatchSize_ = 0L; + errorMessage_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchSizeTooLargeError_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError build() { + com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError buildPartial() { + com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError result = + new com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.maxBatchSize_ = maxBatchSize_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.errorMessage_ = errorMessage_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError.getDefaultInstance()) + return this; + if (other.getMaxBatchSize() != 0L) { + setMaxBatchSize(other.getMaxBatchSize()); + } + if (!other.getErrorMessage().isEmpty()) { + errorMessage_ = other.errorMessage_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + maxBatchSize_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + errorMessage_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long maxBatchSize_; + + /** + * + * + *
+     * The maximum number of items that are supported in a single batch. This is
+     * returned as a hint to the client to adjust the batch size.
+     * 
+ * + * int64 max_batch_size = 1; + * + * @return The maxBatchSize. + */ + @java.lang.Override + public long getMaxBatchSize() { + return maxBatchSize_; + } + + /** + * + * + *
+     * The maximum number of items that are supported in a single batch. This is
+     * returned as a hint to the client to adjust the batch size.
+     * 
+ * + * int64 max_batch_size = 1; + * + * @param value The maxBatchSize to set. + * @return This builder for chaining. + */ + public Builder setMaxBatchSize(long value) { + + maxBatchSize_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The maximum number of items that are supported in a single batch. This is
+     * returned as a hint to the client to adjust the batch size.
+     * 
+ * + * int64 max_batch_size = 1; + * + * @return This builder for chaining. + */ + public Builder clearMaxBatchSize() { + bitField0_ = (bitField0_ & ~0x00000001); + maxBatchSize_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object errorMessage_ = ""; + + /** + * + * + *
+     * Optional. The error message that is returned to the client.
+     * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The errorMessage. + */ + public java.lang.String getErrorMessage() { + java.lang.Object ref = errorMessage_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + errorMessage_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The error message that is returned to the client.
+     * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for errorMessage. + */ + public com.google.protobuf.ByteString getErrorMessageBytes() { + java.lang.Object ref = errorMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + errorMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The error message that is returned to the client.
+     * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The errorMessage to set. + * @return This builder for chaining. + */ + public Builder setErrorMessage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + errorMessage_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The error message that is returned to the client.
+     * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearErrorMessage() { + errorMessage_ = getDefaultInstance().getErrorMessage(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The error message that is returned to the client.
+     * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for errorMessage to set. + * @return This builder for chaining. + */ + public Builder setErrorMessageBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + errorMessage_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError) + private static final com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError(); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchSizeTooLargeError parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeErrorOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeErrorOrBuilder.java new file mode 100644 index 000000000000..f5f10487fcf5 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeErrorOrBuilder.java @@ -0,0 +1,66 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface BatchSizeTooLargeErrorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.BatchSizeTooLargeError) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The maximum number of items that are supported in a single batch. This is
+   * returned as a hint to the client to adjust the batch size.
+   * 
+ * + * int64 max_batch_size = 1; + * + * @return The maxBatchSize. + */ + long getMaxBatchSize(); + + /** + * + * + *
+   * Optional. The error message that is returned to the client.
+   * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The errorMessage. + */ + java.lang.String getErrorMessage(); + + /** + * + * + *
+   * Optional. The error message that is returned to the client.
+   * 
+ * + * string error_message = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for errorMessage. + */ + com.google.protobuf.ByteString getErrorMessageBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequest.java new file mode 100644 index 000000000000..eced1f331c88 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequest.java @@ -0,0 +1,1506 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Request message for BatchUpdateMetastorePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest} + */ +public final class BatchUpdateMetastorePartitionsRequest + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest) + BatchUpdateMetastorePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchUpdateMetastorePartitionsRequest.newBuilder() to construct. + private BatchUpdateMetastorePartitionsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchUpdateMetastorePartitionsRequest() { + parent_ = ""; + requests_ = java.util.Collections.emptyList(); + traceId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchUpdateMetastorePartitionsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest.Builder + .class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUESTS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List + requests_; + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getRequestsList() { + return requests_; + } + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + ? extends + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequestOrBuilder> + getRequestsOrBuilderList() { + return requests_; + } + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getRequestsCount() { + return requests_.size(); + } + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest getRequests( + int index) { + return requests_.get(index); + } + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequestOrBuilder + getRequestsOrBuilder(int index) { + return requests_.get(index); + } + + public static final int TRACE_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object traceId_ = ""; + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + @java.lang.Override + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + for (int i = 0; i < requests_.size(); i++) { + output.writeMessage(2, requests_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, traceId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + for (int i = 0; i < requests_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, requests_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, traceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest other = + (com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getRequestsList().equals(other.getRequestsList())) return false; + if (!getTraceId().equals(other.getTraceId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getRequestsCount() > 0) { + hash = (37 * hash) + REQUESTS_FIELD_NUMBER; + hash = (53 * hash) + getRequestsList().hashCode(); + } + hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTraceId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for BatchUpdateMetastorePartitions.
+   * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest) + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + if (requestsBuilder_ == null) { + requests_ = java.util.Collections.emptyList(); + } else { + requests_ = null; + requestsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + traceId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest build() { + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest result = + new com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest result) { + if (requestsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + requests_ = java.util.Collections.unmodifiableList(requests_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.requests_ = requests_; + } else { + result.requests_ = requestsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.traceId_ = traceId_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (requestsBuilder_ == null) { + if (!other.requests_.isEmpty()) { + if (requests_.isEmpty()) { + requests_ = other.requests_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRequestsIsMutable(); + requests_.addAll(other.requests_); + } + onChanged(); + } + } else { + if (!other.requests_.isEmpty()) { + if (requestsBuilder_.isEmpty()) { + requestsBuilder_.dispose(); + requestsBuilder_ = null; + requests_ = other.requests_; + bitField0_ = (bitField0_ & ~0x00000002); + requestsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getRequestsFieldBuilder() + : null; + } else { + requestsBuilder_.addAllMessages(other.requests_); + } + } + } + if (!other.getTraceId().isEmpty()) { + traceId_ = other.traceId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest + .parser(), + extensionRegistry); + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(m); + } else { + requestsBuilder_.addMessage(m); + } + break; + } // case 18 + case 34: + { + traceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List + requests_ = java.util.Collections.emptyList(); + + private void ensureRequestsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + requests_ = + new java.util.ArrayList< + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest>( + requests_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest, + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.Builder, + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequestOrBuilder> + requestsBuilder_; + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getRequestsList() { + if (requestsBuilder_ == null) { + return java.util.Collections.unmodifiableList(requests_); + } else { + return requestsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getRequestsCount() { + if (requestsBuilder_ == null) { + return requests_.size(); + } else { + return requestsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest getRequests( + int index) { + if (requestsBuilder_ == null) { + return requests_.get(index); + } else { + return requestsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setRequests( + int index, com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.set(index, value); + onChanged(); + } else { + requestsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setRequests( + int index, + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.Builder + builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.set(index, builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.add(value); + onChanged(); + } else { + requestsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + int index, com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest value) { + if (requestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRequestsIsMutable(); + requests_.add(index, value); + onChanged(); + } else { + requestsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.Builder + builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addRequests( + int index, + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.Builder + builderForValue) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.add(index, builderForValue.build()); + onChanged(); + } else { + requestsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllRequests( + java.lang.Iterable< + ? extends com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest> + values) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, requests_); + onChanged(); + } else { + requestsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearRequests() { + if (requestsBuilder_ == null) { + requests_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + requestsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeRequests(int index) { + if (requestsBuilder_ == null) { + ensureRequestsIsMutable(); + requests_.remove(index); + onChanged(); + } else { + requestsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.Builder + getRequestsBuilder(int index) { + return getRequestsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequestOrBuilder + getRequestsOrBuilder(int index) { + if (requestsBuilder_ == null) { + return requests_.get(index); + } else { + return requestsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + ? extends + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequestOrBuilder> + getRequestsOrBuilderList() { + if (requestsBuilder_ != null) { + return requestsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(requests_); + } + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.Builder + addRequestsBuilder() { + return getRequestsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest + .getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.Builder + addRequestsBuilder(int index) { + return getRequestsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest + .getDefaultInstance()); + } + + /** + * + * + *
+     * Required. Requests to update metastore partitions in the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.Builder> + getRequestsBuilderList() { + return getRequestsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest, + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.Builder, + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequestOrBuilder> + getRequestsFieldBuilder() { + if (requestsBuilder_ == null) { + requestsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest, + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.Builder, + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequestOrBuilder>( + requests_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + requests_ = null; + } + return requestsBuilder_; + } + + private java.lang.Object traceId_ = ""; + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + traceId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearTraceId() { + traceId_ = getDefaultInstance().getTraceId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. This is expected, but not required, to be
+     * globally unique.
+     * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + traceId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest) + private static final com.google.cloud.bigquery.storage.v1beta + .BatchUpdateMetastorePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchUpdateMetastorePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..ec207dfaf5f8 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequestOrBuilder.java @@ -0,0 +1,164 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface BatchUpdateMetastorePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getRequestsList(); + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest getRequests(int index); + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getRequestsCount(); + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List< + ? extends + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequestOrBuilder> + getRequestsOrBuilderList(); + + /** + * + * + *
+   * Required. Requests to update metastore partitions in the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest requests = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequestOrBuilder + getRequestsOrBuilder(int index); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + java.lang.String getTraceId(); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. This is expected, but not required, to be
+   * globally unique.
+   * 
+ * + * string trace_id = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + com.google.protobuf.ByteString getTraceIdBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponse.java new file mode 100644 index 000000000000..307a08dd8dee --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponse.java @@ -0,0 +1,1054 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Response message for BatchUpdateMetastorePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse} + */ +public final class BatchUpdateMetastorePartitionsResponse + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse) + BatchUpdateMetastorePartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchUpdateMetastorePartitionsResponse.newBuilder() to construct. + private BatchUpdateMetastorePartitionsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchUpdateMetastorePartitionsResponse() { + partitions_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchUpdateMetastorePartitionsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse.Builder + .class); + } + + public static final int PARTITIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List partitions_; + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + @java.lang.Override + public java.util.List + getPartitionsList() { + return partitions_; + } + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getPartitionsOrBuilderList() { + return partitions_; + } + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + @java.lang.Override + public int getPartitionsCount() { + return partitions_.size(); + } + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getPartitions(int index) { + return partitions_.get(index); + } + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getPartitionsOrBuilder(int index) { + return partitions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < partitions_.size(); i++) { + output.writeMessage(1, partitions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < partitions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, partitions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse other = + (com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse) obj; + + if (!getPartitionsList().equals(other.getPartitionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPartitionsCount() > 0) { + hash = (37 * hash) + PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for BatchUpdateMetastorePartitions.
+   * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse) + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + .Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + } else { + partitions_ = null; + partitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse build() { + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + buildPartial() { + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse result = + new com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse result) { + if (partitionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + partitions_ = java.util.Collections.unmodifiableList(partitions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.partitions_ = partitions_; + } else { + result.partitions_ = partitionsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + .getDefaultInstance()) return this; + if (partitionsBuilder_ == null) { + if (!other.partitions_.isEmpty()) { + if (partitions_.isEmpty()) { + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePartitionsIsMutable(); + partitions_.addAll(other.partitions_); + } + onChanged(); + } + } else { + if (!other.partitions_.isEmpty()) { + if (partitionsBuilder_.isEmpty()) { + partitionsBuilder_.dispose(); + partitionsBuilder_ = null; + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + partitionsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getPartitionsFieldBuilder() + : null; + } else { + partitionsBuilder_.addAllMessages(other.partitions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1beta.MetastorePartition m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.parser(), + extensionRegistry); + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(m); + } else { + partitionsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List + partitions_ = java.util.Collections.emptyList(); + + private void ensurePartitionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + partitions_ = + new java.util.ArrayList( + partitions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + partitionsBuilder_; + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public java.util.List + getPartitionsList() { + if (partitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitions_); + } else { + return partitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public int getPartitionsCount() { + if (partitionsBuilder_ == null) { + return partitions_.size(); + } else { + return partitionsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getPartitions(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder setPartitions( + int index, com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.set(index, value); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder setPartitions( + int index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(value); + onChanged(); + } else { + partitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + int index, com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(index, value); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder addPartitions( + int index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder addAllPartitions( + java.lang.Iterable + values) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitions_); + onChanged(); + } else { + partitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder clearPartitions() { + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + partitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public Builder removePartitions(int index) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.remove(index); + onChanged(); + } else { + partitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder getPartitionsBuilder( + int index) { + return getPartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getPartitionsOrBuilder(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getPartitionsOrBuilderList() { + if (partitionsBuilder_ != null) { + return partitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitions_); + } + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder + addPartitionsBuilder() { + return getPartitionsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder addPartitionsBuilder( + int index) { + return getPartitionsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * The list of metastore partitions that have been updated.
+     * A maximum of 900 partitions can be updated in a batch.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + * + */ + public java.util.List + getPartitionsBuilderList() { + return getPartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getPartitionsFieldBuilder() { + if (partitionsBuilder_ == null) { + partitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder>( + partitions_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + partitions_ = null; + } + return partitionsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse) + private static final com.google.cloud.bigquery.storage.v1beta + .BatchUpdateMetastorePartitionsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchUpdateMetastorePartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponseOrBuilder.java new file mode 100644 index 000000000000..4f21f0e2c165 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponseOrBuilder.java @@ -0,0 +1,88 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface BatchUpdateMetastorePartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.BatchUpdateMetastorePartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + java.util.List getPartitionsList(); + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartition getPartitions(int index); + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + int getPartitionsCount(); + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + java.util.List + getPartitionsOrBuilderList(); + + /** + * + * + *
+   * The list of metastore partitions that have been updated.
+   * A maximum of 900 partitions can be updated in a batch.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1; + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder getPartitionsOrBuilder( + int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequest.java new file mode 100644 index 000000000000..e9c85120134e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequest.java @@ -0,0 +1,1011 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Request message for CreateMetastorePartition. The MetastorePartition is
+ * uniquely identified by values, which is an ordered list. Hence, there is no
+ * separate name or partition id field.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest} + */ +public final class CreateMetastorePartitionRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest) + CreateMetastorePartitionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use CreateMetastorePartitionRequest.newBuilder() to construct. + private CreateMetastorePartitionRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateMetastorePartitionRequest() { + parent_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateMetastorePartitionRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_CreateMetastorePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_CreateMetastorePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.class, + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partition to be
+   * added, in the format of
+   * projects/{project}/databases/{databases}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partition to be
+   * added, in the format of
+   * projects/{project}/databases/{databases}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int METASTORE_PARTITION_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta.MetastorePartition metastorePartition_; + + /** + * + * + *
+   * Required. The metastore partition to be added.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the metastorePartition field is set. + */ + @java.lang.Override + public boolean hasMetastorePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The metastore partition to be added.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The metastorePartition. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getMetastorePartition() { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } + + /** + * + * + *
+   * Required. The metastore partition to be added.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getMetastorePartitionOrBuilder() { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getMetastorePartition()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getMetastorePartition()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest other = + (com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasMetastorePartition() != other.hasMetastorePartition()) return false; + if (hasMetastorePartition()) { + if (!getMetastorePartition().equals(other.getMetastorePartition())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasMetastorePartition()) { + hash = (37 * hash) + METASTORE_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getMetastorePartition().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for CreateMetastorePartition. The MetastorePartition is
+   * uniquely identified by values, which is an ordered list. Hence, there is no
+   * separate name or partition id field.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest) + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_CreateMetastorePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_CreateMetastorePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.class, + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getMetastorePartitionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + metastorePartition_ = null; + if (metastorePartitionBuilder_ != null) { + metastorePartitionBuilder_.dispose(); + metastorePartitionBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_CreateMetastorePartitionRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest build() { + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest result = + new com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.metastorePartition_ = + metastorePartitionBuilder_ == null + ? metastorePartition_ + : metastorePartitionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasMetastorePartition()) { + mergeMetastorePartition(other.getMetastorePartition()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + getMetastorePartitionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partition to be
+     * added, in the format of
+     * projects/{project}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partition to be
+     * added, in the format of
+     * projects/{project}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partition to be
+     * added, in the format of
+     * projects/{project}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partition to be
+     * added, in the format of
+     * projects/{project}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the metastore partition to be
+     * added, in the format of
+     * projects/{project}/databases/{databases}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta.MetastorePartition metastorePartition_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + metastorePartitionBuilder_; + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the metastorePartition field is set. + */ + public boolean hasMetastorePartition() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The metastorePartition. + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getMetastorePartition() { + if (metastorePartitionBuilder_ == null) { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } else { + return metastorePartitionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setMetastorePartition( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (metastorePartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + metastorePartition_ = value; + } else { + metastorePartitionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setMetastorePartition( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (metastorePartitionBuilder_ == null) { + metastorePartition_ = builderForValue.build(); + } else { + metastorePartitionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeMetastorePartition( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (metastorePartitionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && metastorePartition_ != null + && metastorePartition_ + != com.google.cloud.bigquery.storage.v1beta.MetastorePartition + .getDefaultInstance()) { + getMetastorePartitionBuilder().mergeFrom(value); + } else { + metastorePartition_ = value; + } + } else { + metastorePartitionBuilder_.mergeFrom(value); + } + if (metastorePartition_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearMetastorePartition() { + bitField0_ = (bitField0_ & ~0x00000002); + metastorePartition_ = null; + if (metastorePartitionBuilder_ != null) { + metastorePartitionBuilder_.dispose(); + metastorePartitionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder + getMetastorePartitionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getMetastorePartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getMetastorePartitionOrBuilder() { + if (metastorePartitionBuilder_ != null) { + return metastorePartitionBuilder_.getMessageOrBuilder(); + } else { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } + } + + /** + * + * + *
+     * Required. The metastore partition to be added.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getMetastorePartitionFieldBuilder() { + if (metastorePartitionBuilder_ == null) { + metastorePartitionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder>( + getMetastorePartition(), getParentForChildren(), isClean()); + metastorePartition_ = null; + } + return metastorePartitionBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest) + private static final com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateMetastorePartitionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequestOrBuilder.java new file mode 100644 index 000000000000..7c10fb7c505b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequestOrBuilder.java @@ -0,0 +1,104 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface CreateMetastorePartitionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partition to be
+   * added, in the format of
+   * projects/{project}/databases/{databases}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to where the metastore partition to be
+   * added, in the format of
+   * projects/{project}/databases/{databases}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The metastore partition to be added.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the metastorePartition field is set. + */ + boolean hasMetastorePartition(); + + /** + * + * + *
+   * Required. The metastore partition to be added.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The metastorePartition. + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartition getMetastorePartition(); + + /** + * + * + *
+   * Required. The metastore partition to be added.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getMetastorePartitionOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchema.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchema.java new file mode 100644 index 000000000000..b30839e145fc --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchema.java @@ -0,0 +1,837 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Schema description of a metastore partition column.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.FieldSchema} + */ +public final class FieldSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.FieldSchema) + FieldSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use FieldSchema.newBuilder() to construct. + private FieldSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FieldSchema() { + name_ = ""; + type_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FieldSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_FieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_FieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.FieldSchema.class, + com.google.cloud.bigquery.storage.v1beta.FieldSchema.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The name of the column.
+   * The maximum length of the name is 1024 characters
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The name of the column.
+   * The maximum length of the name is 1024 characters
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object type_ = ""; + + /** + * + * + *
+   * Required. The type of the metastore partition column. Maximum allowed
+   * length is 1024 characters.
+   * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The type. + */ + @java.lang.Override + public java.lang.String getType() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The type of the metastore partition column. Maximum allowed
+   * length is 1024 characters.
+   * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for type. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(type_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, type_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(type_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, type_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta.FieldSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.FieldSchema other = + (com.google.cloud.bigquery.storage.v1beta.FieldSchema) obj; + + if (!getName().equals(other.getName())) return false; + if (!getType().equals(other.getType())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1beta.FieldSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Schema description of a metastore partition column.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.FieldSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.FieldSchema) + com.google.cloud.bigquery.storage.v1beta.FieldSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_FieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_FieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.FieldSchema.class, + com.google.cloud.bigquery.storage.v1beta.FieldSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta.FieldSchema.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + type_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_FieldSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.FieldSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.FieldSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.FieldSchema build() { + com.google.cloud.bigquery.storage.v1beta.FieldSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.FieldSchema buildPartial() { + com.google.cloud.bigquery.storage.v1beta.FieldSchema result = + new com.google.cloud.bigquery.storage.v1beta.FieldSchema(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta.FieldSchema result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = type_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta.FieldSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta.FieldSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta.FieldSchema other) { + if (other == com.google.cloud.bigquery.storage.v1beta.FieldSchema.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getType().isEmpty()) { + type_ = other.type_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + type_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The name of the column.
+     * The maximum length of the name is 1024 characters
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The name of the column.
+     * The maximum length of the name is 1024 characters
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The name of the column.
+     * The maximum length of the name is 1024 characters
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the column.
+     * The maximum length of the name is 1024 characters
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The name of the column.
+     * The maximum length of the name is 1024 characters
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object type_ = ""; + + /** + * + * + *
+     * Required. The type of the metastore partition column. Maximum allowed
+     * length is 1024 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The type. + */ + public java.lang.String getType() { + java.lang.Object ref = type_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The type of the metastore partition column. Maximum allowed
+     * length is 1024 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for type. + */ + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The type of the metastore partition column. Maximum allowed
+     * length is 1024 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The type of the metastore partition column. Maximum allowed
+     * length is 1024 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearType() { + type_ = getDefaultInstance().getType(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The type of the metastore partition column. Maximum allowed
+     * length is 1024 characters.
+     * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for type to set. + * @return This builder for chaining. + */ + public Builder setTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.FieldSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.FieldSchema) + private static final com.google.cloud.bigquery.storage.v1beta.FieldSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta.FieldSchema(); + } + + public static com.google.cloud.bigquery.storage.v1beta.FieldSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FieldSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.FieldSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchemaOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchemaOrBuilder.java new file mode 100644 index 000000000000..592e3d4e830a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchemaOrBuilder.java @@ -0,0 +1,82 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface FieldSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.FieldSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the column.
+   * The maximum length of the name is 1024 characters
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The name of the column.
+   * The maximum length of the name is 1024 characters
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Required. The type of the metastore partition column. Maximum allowed
+   * length is 1024 characters.
+   * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The type. + */ + java.lang.String getType(); + + /** + * + * + *
+   * Required. The type of the metastore partition column. Maximum allowed
+   * length is 1024 characters.
+   * 
+ * + * string type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for type. + */ + com.google.protobuf.ByteString getTypeBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequest.java new file mode 100644 index 000000000000..9ad077c6dc59 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequest.java @@ -0,0 +1,1162 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Request message for ListMetastorePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest} + */ +public final class ListMetastorePartitionsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest) + ListMetastorePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ListMetastorePartitionsRequest.newBuilder() to construct. + private ListMetastorePartitionsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListMetastorePartitionsRequest() { + parent_ = ""; + filter_ = ""; + traceId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ListMetastorePartitionsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
+   * Optional. SQL text filtering statement, similar to a WHERE clause in a
+   * query. Only supports single-row expressions.  Aggregate functions are not
+   * supported.
+   *
+   * Examples:
+   * * "int_field > 5"
+   * * "date_field = CAST('2014-9-27' as DATE)"
+   * * "nullable_field is not NULL"
+   * * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+   * * "numeric_field BETWEEN 1.0 AND 5.0"
+   *
+   * Restricted to a maximum length of 1 MB.
+   * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. SQL text filtering statement, similar to a WHERE clause in a
+   * query. Only supports single-row expressions.  Aggregate functions are not
+   * supported.
+   *
+   * Examples:
+   * * "int_field > 5"
+   * * "date_field = CAST('2014-9-27' as DATE)"
+   * * "nullable_field is not NULL"
+   * * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+   * * "numeric_field BETWEEN 1.0 AND 5.0"
+   *
+   * Restricted to a maximum length of 1 MB.
+   * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TRACE_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object traceId_ = ""; + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + @java.lang.Override + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, filter_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, traceId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, filter_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, traceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest other = + (com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (!getTraceId().equals(other.getTraceId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTraceId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for ListMetastorePartitions.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest) + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + filter_ = ""; + traceId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest build() { + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest result = + new com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.filter_ = filter_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.traceId_ = traceId_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getTraceId().isEmpty()) { + traceId_ = other.traceId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + traceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which these metastore partitions
+     * belong, in the format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples:
+     * * "int_field > 5"
+     * * "date_field = CAST('2014-9-27' as DATE)"
+     * * "nullable_field is not NULL"
+     * * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     * * "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length of 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples:
+     * * "int_field > 5"
+     * * "date_field = CAST('2014-9-27' as DATE)"
+     * * "nullable_field is not NULL"
+     * * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     * * "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length of 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples:
+     * * "int_field > 5"
+     * * "date_field = CAST('2014-9-27' as DATE)"
+     * * "nullable_field is not NULL"
+     * * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     * * "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length of 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples:
+     * * "int_field > 5"
+     * * "date_field = CAST('2014-9-27' as DATE)"
+     * * "nullable_field is not NULL"
+     * * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     * * "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length of 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in a
+     * query. Only supports single-row expressions.  Aggregate functions are not
+     * supported.
+     *
+     * Examples:
+     * * "int_field > 5"
+     * * "date_field = CAST('2014-9-27' as DATE)"
+     * * "nullable_field is not NULL"
+     * * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     * * "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length of 1 MB.
+     * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object traceId_ = ""; + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + traceId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearTraceId() { + traceId_ = getDefaultInstance().getTraceId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Optional trace id to be used for debugging. It is expected that
+     * the client sets the same `trace_id` for all the batches in the same
+     * operation, so that it is possible to tie together the logs to all the
+     * batches in the same operation. Limited to 256 characters. This is expected,
+     * but not required, to be globally unique.
+     * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + traceId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest) + private static final com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListMetastorePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..b2a8e541e953 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequestOrBuilder.java @@ -0,0 +1,142 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface ListMetastorePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to which these metastore partitions
+   * belong, in the format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. SQL text filtering statement, similar to a WHERE clause in a
+   * query. Only supports single-row expressions.  Aggregate functions are not
+   * supported.
+   *
+   * Examples:
+   * * "int_field > 5"
+   * * "date_field = CAST('2014-9-27' as DATE)"
+   * * "nullable_field is not NULL"
+   * * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+   * * "numeric_field BETWEEN 1.0 AND 5.0"
+   *
+   * Restricted to a maximum length of 1 MB.
+   * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
+   * Optional. SQL text filtering statement, similar to a WHERE clause in a
+   * query. Only supports single-row expressions.  Aggregate functions are not
+   * supported.
+   *
+   * Examples:
+   * * "int_field > 5"
+   * * "date_field = CAST('2014-9-27' as DATE)"
+   * * "nullable_field is not NULL"
+   * * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+   * * "numeric_field BETWEEN 1.0 AND 5.0"
+   *
+   * Restricted to a maximum length of 1 MB.
+   * 
+ * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The traceId. + */ + java.lang.String getTraceId(); + + /** + * + * + *
+   * Optional. Optional trace id to be used for debugging. It is expected that
+   * the client sets the same `trace_id` for all the batches in the same
+   * operation, so that it is possible to tie together the logs to all the
+   * batches in the same operation. Limited to 256 characters. This is expected,
+   * but not required, to be globally unique.
+   * 
+ * + * string trace_id = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for traceId. + */ + com.google.protobuf.ByteString getTraceIdBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponse.java new file mode 100644 index 000000000000..d45229ad27cb --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponse.java @@ -0,0 +1,1168 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Response message for ListMetastorePartitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse} + */ +public final class ListMetastorePartitionsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse) + ListMetastorePartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ListMetastorePartitionsResponse.newBuilder() to construct. + private ListMetastorePartitionsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListMetastorePartitionsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ListMetastorePartitionsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse.Builder.class); + } + + private int responseCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object response_; + + public enum ResponseCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + PARTITIONS(1), + STREAMS(2), + RESPONSE_NOT_SET(0); + private final int value; + + private ResponseCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ResponseCase valueOf(int value) { + return forNumber(value); + } + + public static ResponseCase forNumber(int value) { + switch (value) { + case 1: + return PARTITIONS; + case 2: + return STREAMS; + case 0: + return RESPONSE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ResponseCase getResponseCase() { + return ResponseCase.forNumber(responseCase_); + } + + public static final int PARTITIONS_FIELD_NUMBER = 1; + + /** + * + * + *
+   * The list of partitions.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + * + * @return Whether the partitions field is set. + */ + @java.lang.Override + public boolean hasPartitions() { + return responseCase_ == 1; + } + + /** + * + * + *
+   * The list of partitions.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + * + * @return The partitions. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList getPartitions() { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList) response_; + } + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.getDefaultInstance(); + } + + /** + * + * + *
+   * The list of partitions.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionListOrBuilder + getPartitionsOrBuilder() { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList) response_; + } + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.getDefaultInstance(); + } + + public static final int STREAMS_FIELD_NUMBER = 2; + + /** + * + * + *
+   * The list of streams.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + * + * @return Whether the streams field is set. + */ + @java.lang.Override + public boolean hasStreams() { + return responseCase_ == 2; + } + + /** + * + * + *
+   * The list of streams.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + * + * @return The streams. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamList getStreams() { + if (responseCase_ == 2) { + return (com.google.cloud.bigquery.storage.v1beta.StreamList) response_; + } + return com.google.cloud.bigquery.storage.v1beta.StreamList.getDefaultInstance(); + } + + /** + * + * + *
+   * The list of streams.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamListOrBuilder getStreamsOrBuilder() { + if (responseCase_ == 2) { + return (com.google.cloud.bigquery.storage.v1beta.StreamList) response_; + } + return com.google.cloud.bigquery.storage.v1beta.StreamList.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (responseCase_ == 1) { + output.writeMessage( + 1, (com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList) response_); + } + if (responseCase_ == 2) { + output.writeMessage(2, (com.google.cloud.bigquery.storage.v1beta.StreamList) response_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (responseCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList) response_); + } + if (responseCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.cloud.bigquery.storage.v1beta.StreamList) response_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse other = + (com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse) obj; + + if (!getResponseCase().equals(other.getResponseCase())) return false; + switch (responseCase_) { + case 1: + if (!getPartitions().equals(other.getPartitions())) return false; + break; + case 2: + if (!getStreams().equals(other.getStreams())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (responseCase_) { + case 1: + hash = (37 * hash) + PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitions().hashCode(); + break; + case 2: + hash = (37 * hash) + STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getStreams().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for ListMetastorePartitions.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse) + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partitionsBuilder_ != null) { + partitionsBuilder_.clear(); + } + if (streamsBuilder_ != null) { + streamsBuilder_.clear(); + } + responseCase_ = 0; + response_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse build() { + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse result = + new com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse result) { + result.responseCase_ = responseCase_; + result.response_ = this.response_; + if (responseCase_ == 1 && partitionsBuilder_ != null) { + result.response_ = partitionsBuilder_.build(); + } + if (responseCase_ == 2 && streamsBuilder_ != null) { + result.response_ = streamsBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse + .getDefaultInstance()) return this; + switch (other.getResponseCase()) { + case PARTITIONS: + { + mergePartitions(other.getPartitions()); + break; + } + case STREAMS: + { + mergeStreams(other.getStreams()); + break; + } + case RESPONSE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getPartitionsFieldBuilder().getBuilder(), extensionRegistry); + responseCase_ = 1; + break; + } // case 10 + case 18: + { + input.readMessage(getStreamsFieldBuilder().getBuilder(), extensionRegistry); + responseCase_ = 2; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int responseCase_ = 0; + private java.lang.Object response_; + + public ResponseCase getResponseCase() { + return ResponseCase.forNumber(responseCase_); + } + + public Builder clearResponse() { + responseCase_ = 0; + response_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionListOrBuilder> + partitionsBuilder_; + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + * + * @return Whether the partitions field is set. + */ + @java.lang.Override + public boolean hasPartitions() { + return responseCase_ == 1; + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + * + * @return The partitions. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList getPartitions() { + if (partitionsBuilder_ == null) { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList) response_; + } + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.getDefaultInstance(); + } else { + if (responseCase_ == 1) { + return partitionsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.getDefaultInstance(); + } + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + */ + public Builder setPartitions( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + response_ = value; + onChanged(); + } else { + partitionsBuilder_.setMessage(value); + } + responseCase_ = 1; + return this; + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + */ + public Builder setPartitions( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.Builder builderForValue) { + if (partitionsBuilder_ == null) { + response_ = builderForValue.build(); + onChanged(); + } else { + partitionsBuilder_.setMessage(builderForValue.build()); + } + responseCase_ = 1; + return this; + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + */ + public Builder mergePartitions( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList value) { + if (partitionsBuilder_ == null) { + if (responseCase_ == 1 + && response_ + != com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList + .getDefaultInstance()) { + response_ = + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.newBuilder( + (com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList) response_) + .mergeFrom(value) + .buildPartial(); + } else { + response_ = value; + } + onChanged(); + } else { + if (responseCase_ == 1) { + partitionsBuilder_.mergeFrom(value); + } else { + partitionsBuilder_.setMessage(value); + } + } + responseCase_ = 1; + return this; + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + */ + public Builder clearPartitions() { + if (partitionsBuilder_ == null) { + if (responseCase_ == 1) { + responseCase_ = 0; + response_ = null; + onChanged(); + } + } else { + if (responseCase_ == 1) { + responseCase_ = 0; + response_ = null; + } + partitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.Builder + getPartitionsBuilder() { + return getPartitionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionListOrBuilder + getPartitionsOrBuilder() { + if ((responseCase_ == 1) && (partitionsBuilder_ != null)) { + return partitionsBuilder_.getMessageOrBuilder(); + } else { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList) response_; + } + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.getDefaultInstance(); + } + } + + /** + * + * + *
+     * The list of partitions.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionListOrBuilder> + getPartitionsFieldBuilder() { + if (partitionsBuilder_ == null) { + if (!(responseCase_ == 1)) { + response_ = + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.getDefaultInstance(); + } + partitionsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionListOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList) response_, + getParentForChildren(), + isClean()); + response_ = null; + } + responseCase_ = 1; + onChanged(); + return partitionsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.StreamList, + com.google.cloud.bigquery.storage.v1beta.StreamList.Builder, + com.google.cloud.bigquery.storage.v1beta.StreamListOrBuilder> + streamsBuilder_; + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + * + * @return Whether the streams field is set. + */ + @java.lang.Override + public boolean hasStreams() { + return responseCase_ == 2; + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + * + * @return The streams. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamList getStreams() { + if (streamsBuilder_ == null) { + if (responseCase_ == 2) { + return (com.google.cloud.bigquery.storage.v1beta.StreamList) response_; + } + return com.google.cloud.bigquery.storage.v1beta.StreamList.getDefaultInstance(); + } else { + if (responseCase_ == 2) { + return streamsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta.StreamList.getDefaultInstance(); + } + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + */ + public Builder setStreams(com.google.cloud.bigquery.storage.v1beta.StreamList value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + response_ = value; + onChanged(); + } else { + streamsBuilder_.setMessage(value); + } + responseCase_ = 2; + return this; + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + */ + public Builder setStreams( + com.google.cloud.bigquery.storage.v1beta.StreamList.Builder builderForValue) { + if (streamsBuilder_ == null) { + response_ = builderForValue.build(); + onChanged(); + } else { + streamsBuilder_.setMessage(builderForValue.build()); + } + responseCase_ = 2; + return this; + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + */ + public Builder mergeStreams(com.google.cloud.bigquery.storage.v1beta.StreamList value) { + if (streamsBuilder_ == null) { + if (responseCase_ == 2 + && response_ + != com.google.cloud.bigquery.storage.v1beta.StreamList.getDefaultInstance()) { + response_ = + com.google.cloud.bigquery.storage.v1beta.StreamList.newBuilder( + (com.google.cloud.bigquery.storage.v1beta.StreamList) response_) + .mergeFrom(value) + .buildPartial(); + } else { + response_ = value; + } + onChanged(); + } else { + if (responseCase_ == 2) { + streamsBuilder_.mergeFrom(value); + } else { + streamsBuilder_.setMessage(value); + } + } + responseCase_ = 2; + return this; + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + */ + public Builder clearStreams() { + if (streamsBuilder_ == null) { + if (responseCase_ == 2) { + responseCase_ = 0; + response_ = null; + onChanged(); + } + } else { + if (responseCase_ == 2) { + responseCase_ = 0; + response_ = null; + } + streamsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + */ + public com.google.cloud.bigquery.storage.v1beta.StreamList.Builder getStreamsBuilder() { + return getStreamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamListOrBuilder getStreamsOrBuilder() { + if ((responseCase_ == 2) && (streamsBuilder_ != null)) { + return streamsBuilder_.getMessageOrBuilder(); + } else { + if (responseCase_ == 2) { + return (com.google.cloud.bigquery.storage.v1beta.StreamList) response_; + } + return com.google.cloud.bigquery.storage.v1beta.StreamList.getDefaultInstance(); + } + } + + /** + * + * + *
+     * The list of streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.StreamList, + com.google.cloud.bigquery.storage.v1beta.StreamList.Builder, + com.google.cloud.bigquery.storage.v1beta.StreamListOrBuilder> + getStreamsFieldBuilder() { + if (streamsBuilder_ == null) { + if (!(responseCase_ == 2)) { + response_ = com.google.cloud.bigquery.storage.v1beta.StreamList.getDefaultInstance(); + } + streamsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.StreamList, + com.google.cloud.bigquery.storage.v1beta.StreamList.Builder, + com.google.cloud.bigquery.storage.v1beta.StreamListOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta.StreamList) response_, + getParentForChildren(), + isClean()); + response_ = null; + } + responseCase_ = 2; + onChanged(); + return streamsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse) + private static final com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListMetastorePartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponseOrBuilder.java new file mode 100644 index 000000000000..5ae8a0a9ef9a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponseOrBuilder.java @@ -0,0 +1,103 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface ListMetastorePartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of partitions.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + * + * @return Whether the partitions field is set. + */ + boolean hasPartitions(); + + /** + * + * + *
+   * The list of partitions.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + * + * @return The partitions. + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList getPartitions(); + + /** + * + * + *
+   * The list of partitions.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta.MetastorePartitionList partitions = 1; + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionListOrBuilder getPartitionsOrBuilder(); + + /** + * + * + *
+   * The list of streams.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + * + * @return Whether the streams field is set. + */ + boolean hasStreams(); + + /** + * + * + *
+   * The list of streams.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + * + * @return The streams. + */ + com.google.cloud.bigquery.storage.v1beta.StreamList getStreams(); + + /** + * + * + *
+   * The list of streams.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta.StreamList streams = 2; + */ + com.google.cloud.bigquery.storage.v1beta.StreamListOrBuilder getStreamsOrBuilder(); + + com.google.cloud.bigquery.storage.v1beta.ListMetastorePartitionsResponse.ResponseCase + getResponseCase(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartition.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartition.java new file mode 100644 index 000000000000..88813249d250 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartition.java @@ -0,0 +1,2344 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Information about a Hive partition.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.MetastorePartition} + */ +public final class MetastorePartition extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.MetastorePartition) + MetastorePartitionOrBuilder { + private static final long serialVersionUID = 0L; + + // Use MetastorePartition.newBuilder() to construct. + private MetastorePartition(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MetastorePartition() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + fields_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new MetastorePartition(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.class, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder.class); + } + + private int bitField0_; + public static final int VALUES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + return values_; + } + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + public static final int CREATE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int STORAGE_DESCRIPTOR_FIELD_NUMBER = 3; + private com.google.cloud.bigquery.storage.v1beta.StorageDescriptor storageDescriptor_; + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the storageDescriptor field is set. + */ + @java.lang.Override + public boolean hasStorageDescriptor() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The storageDescriptor. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StorageDescriptor getStorageDescriptor() { + return storageDescriptor_ == null + ? com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StorageDescriptorOrBuilder + getStorageDescriptorOrBuilder() { + return storageDescriptor_ == null + ? com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } + + public static final int PARAMETERS_FIELD_NUMBER = 4; + + private static final class ParametersDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_ParametersEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField(ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int FIELDS_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private java.util.List fields_; + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getFieldsList() { + return fields_; + } + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getFieldsOrBuilderList() { + return fields_; + } + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getFieldsCount() { + return fields_.size(); + } + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.FieldSchema getFields(int index) { + return fields_.get(index); + } + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.FieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + return fields_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < values_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, values_.getRaw(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getStorageDescriptor()); + } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, internalGetParameters(), ParametersDefaultEntryHolder.defaultEntry, 4); + for (int i = 0; i < fields_.size(); i++) { + output.writeMessage(5, fields_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < values_.size(); i++) { + dataSize += computeStringSizeNoTag(values_.getRaw(i)); + } + size += dataSize; + size += 1 * getValuesList().size(); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getStorageDescriptor()); + } + for (java.util.Map.Entry entry : + internalGetParameters().getMap().entrySet()) { + com.google.protobuf.MapEntry parameters__ = + ParametersDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, parameters__); + } + for (int i = 0; i < fields_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, fields_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta.MetastorePartition)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.MetastorePartition other = + (com.google.cloud.bigquery.storage.v1beta.MetastorePartition) obj; + + if (!getValuesList().equals(other.getValuesList())) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasStorageDescriptor() != other.hasStorageDescriptor()) return false; + if (hasStorageDescriptor()) { + if (!getStorageDescriptor().equals(other.getStorageDescriptor())) return false; + } + if (!internalGetParameters().equals(other.internalGetParameters())) return false; + if (!getFieldsList().equals(other.getFieldsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasStorageDescriptor()) { + hash = (37 * hash) + STORAGE_DESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getStorageDescriptor().hashCode(); + } + if (!internalGetParameters().getMap().isEmpty()) { + hash = (37 * hash) + PARAMETERS_FIELD_NUMBER; + hash = (53 * hash) + internalGetParameters().hashCode(); + } + if (getFieldsCount() > 0) { + hash = (37 * hash) + FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getFieldsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information about a Hive partition.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.MetastorePartition} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.MetastorePartition) + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetMutableParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.class, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta.MetastorePartition.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getCreateTimeFieldBuilder(); + getStorageDescriptorFieldBuilder(); + getFieldsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + storageDescriptor_ = null; + if (storageDescriptorBuilder_ != null) { + storageDescriptorBuilder_.dispose(); + storageDescriptorBuilder_ = null; + } + internalGetMutableParameters().clear(); + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + } else { + fields_ = null; + fieldsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition build() { + com.google.cloud.bigquery.storage.v1beta.MetastorePartition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition buildPartial() { + com.google.cloud.bigquery.storage.v1beta.MetastorePartition result = + new com.google.cloud.bigquery.storage.v1beta.MetastorePartition(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition result) { + if (fieldsBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.fields_ = fields_; + } else { + result.fields_ = fieldsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta.MetastorePartition result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + values_.makeImmutable(); + result.values_ = values_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.storageDescriptor_ = + storageDescriptorBuilder_ == null + ? storageDescriptor_ + : storageDescriptorBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.parameters_ = internalGetParameters(); + result.parameters_.makeImmutable(); + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta.MetastorePartition) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta.MetastorePartition) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta.MetastorePartition other) { + if (other == com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance()) + return this; + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ |= 0x00000001; + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasStorageDescriptor()) { + mergeStorageDescriptor(other.getStorageDescriptor()); + } + internalGetMutableParameters().mergeFrom(other.internalGetParameters()); + bitField0_ |= 0x00000008; + if (fieldsBuilder_ == null) { + if (!other.fields_.isEmpty()) { + if (fields_.isEmpty()) { + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureFieldsIsMutable(); + fields_.addAll(other.fields_); + } + onChanged(); + } + } else { + if (!other.fields_.isEmpty()) { + if (fieldsBuilder_.isEmpty()) { + fieldsBuilder_.dispose(); + fieldsBuilder_ = null; + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000010); + fieldsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getFieldsFieldBuilder() + : null; + } else { + fieldsBuilder_.addAllMessages(other.fields_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureValuesIsMutable(); + values_.add(s); + break; + } // case 10 + case 18: + { + input.readMessage(getCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + getStorageDescriptorFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.protobuf.MapEntry parameters__ = + input.readMessage( + ParametersDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableParameters() + .getMutableMap() + .put(parameters__.getKey(), parameters__.getValue()); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + com.google.cloud.bigquery.storage.v1beta.FieldSchema m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta.FieldSchema.parser(), + extensionRegistry); + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(m); + } else { + fieldsBuilder_.addMessage(m); + } + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureValuesIsMutable() { + if (!values_.isModifiable()) { + values_ = new com.google.protobuf.LazyStringArrayList(values_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + values_.makeImmutable(); + return values_; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The values to set. + * @return This builder for chaining. + */ + public Builder setValues(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The values to add. + * @return This builder for chaining. + */ + public Builder addValues(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The values to add. + * @return This builder for chaining. + */ + public Builder addAllValues(java.lang.Iterable values) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearValues() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Represents the values of the partition keys, where each value
+     * corresponds to a specific partition key in the order in which the keys are
+     * defined. Each value is limited to 1024 characters.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the values to add. + * @return This builder for chaining. + */ + public Builder addValuesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000002); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. The creation time of the partition.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta.StorageDescriptor storageDescriptor_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor, + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.Builder, + com.google.cloud.bigquery.storage.v1beta.StorageDescriptorOrBuilder> + storageDescriptorBuilder_; + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the storageDescriptor field is set. + */ + public boolean hasStorageDescriptor() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The storageDescriptor. + */ + public com.google.cloud.bigquery.storage.v1beta.StorageDescriptor getStorageDescriptor() { + if (storageDescriptorBuilder_ == null) { + return storageDescriptor_ == null + ? com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } else { + return storageDescriptorBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setStorageDescriptor( + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor value) { + if (storageDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + storageDescriptor_ = value; + } else { + storageDescriptorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setStorageDescriptor( + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.Builder builderForValue) { + if (storageDescriptorBuilder_ == null) { + storageDescriptor_ = builderForValue.build(); + } else { + storageDescriptorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeStorageDescriptor( + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor value) { + if (storageDescriptorBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && storageDescriptor_ != null + && storageDescriptor_ + != com.google.cloud.bigquery.storage.v1beta.StorageDescriptor + .getDefaultInstance()) { + getStorageDescriptorBuilder().mergeFrom(value); + } else { + storageDescriptor_ = value; + } + } else { + storageDescriptorBuilder_.mergeFrom(value); + } + if (storageDescriptor_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearStorageDescriptor() { + bitField0_ = (bitField0_ & ~0x00000004); + storageDescriptor_ = null; + if (storageDescriptorBuilder_ != null) { + storageDescriptorBuilder_.dispose(); + storageDescriptorBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.Builder + getStorageDescriptorBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getStorageDescriptorFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.StorageDescriptorOrBuilder + getStorageDescriptorOrBuilder() { + if (storageDescriptorBuilder_ != null) { + return storageDescriptorBuilder_.getMessageOrBuilder(); + } else { + return storageDescriptor_ == null + ? com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.getDefaultInstance() + : storageDescriptor_; + } + } + + /** + * + * + *
+     * Optional. Contains information about the physical storage of the data in
+     * the partition.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor, + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.Builder, + com.google.cloud.bigquery.storage.v1beta.StorageDescriptorOrBuilder> + getStorageDescriptorFieldBuilder() { + if (storageDescriptorBuilder_ == null) { + storageDescriptorBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor, + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.Builder, + com.google.cloud.bigquery.storage.v1beta.StorageDescriptorOrBuilder>( + getStorageDescriptor(), getParentForChildren(), isClean()); + storageDescriptor_ = null; + } + return storageDescriptorBuilder_; + } + + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField + internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + private com.google.protobuf.MapField + internalGetMutableParameters() { + if (parameters_ == null) { + parameters_ = + com.google.protobuf.MapField.newMapField(ParametersDefaultEntryHolder.defaultEntry); + } + if (!parameters_.isMutable()) { + parameters_ = parameters_.copy(); + } + bitField0_ |= 0x00000008; + onChanged(); + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearParameters() { + bitField0_ = (bitField0_ & ~0x00000008); + internalGetMutableParameters().getMutableMap().clear(); + return this; + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableParameters().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableParameters() { + bitField0_ |= 0x00000008; + return internalGetMutableParameters().getMutableMap(); + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putParameters(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableParameters().getMutableMap().put(key, value); + bitField0_ |= 0x00000008; + return this; + } + + /** + * + * + *
+     * Optional. Additional parameters or metadata associated with the partition.
+     * Maximum size 10 KiB.
+     * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllParameters(java.util.Map values) { + internalGetMutableParameters().getMutableMap().putAll(values); + bitField0_ |= 0x00000008; + return this; + } + + private java.util.List fields_ = + java.util.Collections.emptyList(); + + private void ensureFieldsIsMutable() { + if (!((bitField0_ & 0x00000010) != 0)) { + fields_ = + new java.util.ArrayList(fields_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.FieldSchema, + com.google.cloud.bigquery.storage.v1beta.FieldSchema.Builder, + com.google.cloud.bigquery.storage.v1beta.FieldSchemaOrBuilder> + fieldsBuilder_; + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List getFieldsList() { + if (fieldsBuilder_ == null) { + return java.util.Collections.unmodifiableList(fields_); + } else { + return fieldsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getFieldsCount() { + if (fieldsBuilder_ == null) { + return fields_.size(); + } else { + return fieldsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.FieldSchema getFields(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1beta.FieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.set(index, value); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1beta.FieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.set(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields(com.google.cloud.bigquery.storage.v1beta.FieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(value); + onChanged(); + } else { + fieldsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1beta.FieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(index, value); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + com.google.cloud.bigquery.storage.v1beta.FieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1beta.FieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllFields( + java.lang.Iterable values) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); + onChanged(); + } else { + fieldsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearFields() { + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + fieldsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeFields(int index) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.remove(index); + onChanged(); + } else { + fieldsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.FieldSchema.Builder getFieldsBuilder( + int index) { + return getFieldsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.FieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsOrBuilderList() { + if (fieldsBuilder_ != null) { + return fieldsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(fields_); + } + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.FieldSchema.Builder addFieldsBuilder() { + return getFieldsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1beta.FieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.FieldSchema.Builder addFieldsBuilder( + int index) { + return getFieldsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1beta.FieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. List of columns.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsBuilderList() { + return getFieldsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.FieldSchema, + com.google.cloud.bigquery.storage.v1beta.FieldSchema.Builder, + com.google.cloud.bigquery.storage.v1beta.FieldSchemaOrBuilder> + getFieldsFieldBuilder() { + if (fieldsBuilder_ == null) { + fieldsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.FieldSchema, + com.google.cloud.bigquery.storage.v1beta.FieldSchema.Builder, + com.google.cloud.bigquery.storage.v1beta.FieldSchemaOrBuilder>( + fields_, ((bitField0_ & 0x00000010) != 0), getParentForChildren(), isClean()); + fields_ = null; + } + return fieldsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.MetastorePartition) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.MetastorePartition) + private static final com.google.cloud.bigquery.storage.v1beta.MetastorePartition DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta.MetastorePartition(); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartition getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MetastorePartition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionList.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionList.java new file mode 100644 index 000000000000..2ff6ba3def94 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionList.java @@ -0,0 +1,1036 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * List of metastore partitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.MetastorePartitionList} + */ +public final class MetastorePartitionList extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.MetastorePartitionList) + MetastorePartitionListOrBuilder { + private static final long serialVersionUID = 0L; + + // Use MetastorePartitionList.newBuilder() to construct. + private MetastorePartitionList(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MetastorePartitionList() { + partitions_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new MetastorePartitionList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.class, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.Builder.class); + } + + public static final int PARTITIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List partitions_; + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getPartitionsList() { + return partitions_; + } + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getPartitionsOrBuilderList() { + return partitions_; + } + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getPartitionsCount() { + return partitions_.size(); + } + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getPartitions(int index) { + return partitions_.get(index); + } + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getPartitionsOrBuilder(int index) { + return partitions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < partitions_.size(); i++) { + output.writeMessage(1, partitions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < partitions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, partitions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList other = + (com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList) obj; + + if (!getPartitionsList().equals(other.getPartitionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPartitionsCount() > 0) { + hash = (37 * hash) + PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * List of metastore partitions.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.MetastorePartitionList} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.MetastorePartitionList) + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.class, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + } else { + partitions_ = null; + partitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionList_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList build() { + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList buildPartial() { + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList result = + new com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList result) { + if (partitionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + partitions_ = java.util.Collections.unmodifiableList(partitions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.partitions_ = partitions_; + } else { + result.partitions_ = partitionsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList.getDefaultInstance()) + return this; + if (partitionsBuilder_ == null) { + if (!other.partitions_.isEmpty()) { + if (partitions_.isEmpty()) { + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePartitionsIsMutable(); + partitions_.addAll(other.partitions_); + } + onChanged(); + } + } else { + if (!other.partitions_.isEmpty()) { + if (partitionsBuilder_.isEmpty()) { + partitionsBuilder_.dispose(); + partitionsBuilder_ = null; + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + partitionsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getPartitionsFieldBuilder() + : null; + } else { + partitionsBuilder_.addAllMessages(other.partitions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1beta.MetastorePartition m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.parser(), + extensionRegistry); + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(m); + } else { + partitionsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List + partitions_ = java.util.Collections.emptyList(); + + private void ensurePartitionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + partitions_ = + new java.util.ArrayList( + partitions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + partitionsBuilder_; + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getPartitionsList() { + if (partitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitions_); + } else { + return partitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getPartitionsCount() { + if (partitionsBuilder_ == null) { + return partitions_.size(); + } else { + return partitionsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getPartitions(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartitions( + int index, com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.set(index, value); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setPartitions( + int index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitions( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(value); + onChanged(); + } else { + partitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitions( + int index, com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(index, value); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitions( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addPartitions( + int index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllPartitions( + java.lang.Iterable + values) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitions_); + onChanged(); + } else { + partitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearPartitions() { + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + partitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removePartitions(int index) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.remove(index); + onChanged(); + } else { + partitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder getPartitionsBuilder( + int index) { + return getPartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getPartitionsOrBuilder(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getPartitionsOrBuilderList() { + if (partitionsBuilder_ != null) { + return partitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitions_); + } + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder + addPartitionsBuilder() { + return getPartitionsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder addPartitionsBuilder( + int index) { + return getPartitionsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * Required. List of partitions.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getPartitionsBuilderList() { + return getPartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getPartitionsFieldBuilder() { + if (partitionsBuilder_ == null) { + partitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder>( + partitions_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + partitions_ = null; + } + return partitionsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.MetastorePartitionList) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.MetastorePartitionList) + private static final com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList(); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MetastorePartitionList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionList + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionListOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionListOrBuilder.java new file mode 100644 index 000000000000..f05565bf42ca --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionListOrBuilder.java @@ -0,0 +1,93 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface MetastorePartitionListOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.MetastorePartitionList) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getPartitionsList(); + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartition getPartitions(int index); + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getPartitionsCount(); + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getPartitionsOrBuilderList(); + + /** + * + * + *
+   * Required. List of partitions.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition partitions = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder getPartitionsOrBuilder( + int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionOrBuilder.java new file mode 100644 index 000000000000..e3d1ae03c0fb --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionOrBuilder.java @@ -0,0 +1,314 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface MetastorePartitionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.MetastorePartition) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + java.util.List getValuesList(); + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + int getValuesCount(); + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + java.lang.String getValues(int index); + + /** + * + * + *
+   * Required. Represents the values of the partition keys, where each value
+   * corresponds to a specific partition key in the order in which the keys are
+   * defined. Each value is limited to 1024 characters.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + com.google.protobuf.ByteString getValuesBytes(int index); + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. The creation time of the partition.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the storageDescriptor field is set. + */ + boolean hasStorageDescriptor(); + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The storageDescriptor. + */ + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor getStorageDescriptor(); + + /** + * + * + *
+   * Optional. Contains information about the physical storage of the data in
+   * the partition.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.StorageDescriptor storage_descriptor = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta.StorageDescriptorOrBuilder + getStorageDescriptorOrBuilder(); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getParametersCount(); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsParameters(java.lang.String key); + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Deprecated + java.util.Map getParameters(); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getParametersMap(); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + /* nullable */ + java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
+   * Optional. Additional parameters or metadata associated with the partition.
+   * Maximum size 10 KiB.
+   * 
+ * + * map<string, string> parameters = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getParametersOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getFieldsList(); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta.FieldSchema getFields(int index); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getFieldsCount(); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getFieldsOrBuilderList(); + + /** + * + * + *
+   * Optional. List of columns.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.FieldSchema fields = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta.FieldSchemaOrBuilder getFieldsOrBuilder(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionProto.java new file mode 100644 index 000000000000..c115d6b1c249 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionProto.java @@ -0,0 +1,236 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public final class MetastorePartitionProto { + private MetastorePartitionProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_FieldSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_FieldSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_StorageDescriptor_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_StorageDescriptor_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_ParametersEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_ParametersEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_ParametersEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_ParametersEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionList_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionList_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_ReadStream_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_ReadStream_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_StreamList_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_StreamList_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionValues_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionValues_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "4google/cloud/bigquery/storage/v1beta/partition.proto\022$google.cloud.bigquery.st" + + "orage.v1beta\032\037google/api/field_behavior." + + "proto\032\031google/api/resource.proto\032\037google/protobuf/timestamp.proto\"3\n" + + "\013FieldSchema\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\002\022\021\n" + + "\004type\030\002 \001(\tB\003\340A\002\"\257\001\n" + + "\021StorageDescriptor\022\031\n" + + "\014location_uri\030\001 \001(\tB\003\340A\001\022\031\n" + + "\014input_format\030\002 \001(\tB\003\340A\001\022\032\n\r" + + "output_format\030\003 \001(\tB\003\340A\001\022H\n\n" + + "serde_info\030\004 \001" + + "(\0132/.google.cloud.bigquery.storage.v1beta.SerDeInfoB\003\340A\001\"\317\001\n" + + "\tSerDeInfo\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\001\022\"\n" + + "\025serialization_library\030\002 \001(\tB\003\340A\002\022X\n\n" + + "parameters\030\003 \003(\0132?.google.cloud" + + ".bigquery.storage.v1beta.SerDeInfo.ParametersEntryB\003\340A\001\0321\n" + + "\017ParametersEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001\"\227\003\n" + + "\022MetastorePartition\022\023\n" + + "\006values\030\001 \003(\tB\003\340A\002\0224\n" + + "\013create_time\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022X\n" + + "\022storage_descriptor\030\003 \001(\01327.googl" + + "e.cloud.bigquery.storage.v1beta.StorageDescriptorB\003\340A\001\022a\n\n" + + "parameters\030\004 \003(\0132H.goo" + + "gle.cloud.bigquery.storage.v1beta.MetastorePartition.ParametersEntryB\003\340A\001\022F\n" + + "\006fields\030\005" + + " \003(\01321.google.cloud.bigquery.storage.v1beta.FieldSchemaB\003\340A\001\0321\n" + + "\017ParametersEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001\"k\n" + + "\026MetastorePartitionList\022Q\n\n" + + "partitions\030\001 \003" + + "(\01328.google.cloud.bigquery.storage.v1beta.MetastorePartitionB\003\340A\002\"\272\001\n\n" + + "ReadStream\022\024\n" + + "\004name\030\001 \001(\tB\006\340A\003\340A\010:\225\001\352A\221\001\n" + + ")bigquerystorage.googleapis.com/ReadStream\022Kprojec" + + "ts/{project}/locations/{location}/sessio" + + "ns/{session}/streams/{stream}*\013readStreams2\n" + + "readStream\"T\n\n" + + "StreamList\022F\n" + + "\007streams\030\001" + + " \003(\01320.google.cloud.bigquery.storage.v1beta.ReadStreamB\003\340A\003\"/\n" + + "\030MetastorePartitionValues\022\023\n" + + "\006values\030\001 \003(\tB\003\340A\002B\327\001\n" + + "(com.google.cloud.bigquery.storage.v1betaB\027Meta" + + "storePartitionProtoP\001ZBcloud.google.com/go/bigquery/storage/apiv1beta/storagepb;" + + "storagepb\252\002$Google.Cloud.BigQuery.Storag" + + "e.V1Beta\312\002$Google\\Cloud\\BigQuery\\Storage\\V1betab\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1beta_FieldSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta_FieldSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_FieldSchema_descriptor, + new java.lang.String[] { + "Name", "Type", + }); + internal_static_google_cloud_bigquery_storage_v1beta_StorageDescriptor_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta_StorageDescriptor_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_StorageDescriptor_descriptor, + new java.lang.String[] { + "LocationUri", "InputFormat", "OutputFormat", "SerdeInfo", + }); + internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_descriptor, + new java.lang.String[] { + "Name", "SerializationLibrary", "Parameters", + }); + internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_ParametersEntry_descriptor = + internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_ParametersEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_ParametersEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_descriptor, + new java.lang.String[] { + "Values", "CreateTime", "StorageDescriptor", "Parameters", "Fields", + }); + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_ParametersEntry_descriptor = + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_ParametersEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartition_ParametersEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionList_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionList_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionList_descriptor, + new java.lang.String[] { + "Partitions", + }); + internal_static_google_cloud_bigquery_storage_v1beta_ReadStream_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_bigquery_storage_v1beta_ReadStream_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_ReadStream_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_bigquery_storage_v1beta_StreamList_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_bigquery_storage_v1beta_StreamList_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_StreamList_descriptor, + new java.lang.String[] { + "Streams", + }); + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionValues_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionValues_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionValues_descriptor, + new java.lang.String[] { + "Values", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.ResourceProto.resource); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceProto.java new file mode 100644 index 000000000000..042afd2029d5 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceProto.java @@ -0,0 +1,315 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public final class MetastorePartitionServiceProto { + private MetastorePartitionServiceProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_CreateMetastorePartitionRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_CreateMetastorePartitionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_BatchDeleteMetastorePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_BatchDeleteMetastorePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_UpdateMetastorePartitionRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_UpdateMetastorePartitionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta_BatchSizeTooLargeError_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta_BatchSizeTooLargeError_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + ">google/cloud/bigquery/storage/v1beta/metastore_partition.proto\022$google.cloud.b" + + "igquery.storage.v1beta\032\034google/api/annot" + + "ations.proto\032\027google/api/client.proto\032\037g" + + "oogle/api/field_behavior.proto\032\031google/a" + + "pi/resource.proto\0324google/cloud/bigquery" + + "/storage/v1beta/partition.proto\032\033google/protobuf/empty.proto\032" + + " google/protobuf/field_mask.proto\"\264\001\n" + + "\037CreateMetastorePartitionRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035bigquery.googleapis.com/Table\022Z\n" + + "\023metastore_partition\030\002 \001(\01328.google.cloud.bigquery.s" + + "torage.v1beta.MetastorePartitionB\003\340A\002\"\372\001\n" + + "%BatchCreateMetastorePartitionsRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035bigquery.googleapis.com/Table\022\\\n" + + "\010requests\030\002 \003(\0132E.googl" + + "e.cloud.bigquery.storage.v1beta.CreateMetastorePartitionRequestB\003\340A\002\022%\n" + + "\030skip_existing_partitions\030\003 \001(\010B\003\340A\001\022\025\n" + + "\010trace_id\030\004 \001(\tB\003\340A\001\"v\n" + + "&BatchCreateMetastorePartitionsResponse\022L\n\n" + + "partitions\030\001 \003(\01328.googl" + + "e.cloud.bigquery.storage.v1beta.MetastorePartition\"\324\001\n" + + "%BatchDeleteMetastorePartitionsRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035bigquery.googleapis.com/Table\022]\n" + + "\020partition_values\030\002 \003(\0132>.google.cloud.bigquery.st" + + "orage.v1beta.MetastorePartitionValuesB\003\340A\002\022\025\n" + + "\010trace_id\030\004 \001(\tB\003\340A\001\"\263\001\n" + + "\037UpdateMetastorePartitionRequest\022Z\n" + + "\023metastore_partition\030\001" + + " \001(\01328.google.cloud.bigquery.storage.v1beta.MetastorePartitionB\003\340A\002\0224\n" + + "\013update_mask\030\002" + + " \001(\0132\032.google.protobuf.FieldMaskB\003\340A\001\"\323\001\n" + + "%BatchUpdateMetastorePartitionsRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035bigquery.googleapis.com/Table\022\\\n" + + "\010requests\030\002 \003(\0132E.google.cloud.bigquery.storage.v1bet" + + "a.UpdateMetastorePartitionRequestB\003\340A\002\022\025\n" + + "\010trace_id\030\004 \001(\tB\003\340A\001\"v\n" + + "&BatchUpdateMetastorePartitionsResponse\022L\n\n" + + "partitions\030\001 " + + "\003(\01328.google.cloud.bigquery.storage.v1beta.MetastorePartition\"\203\001\n" + + "\036ListMetastorePartitionsRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035bigquery.googleapis.com/Table\022\023\n" + + "\006filter\030\002 \001(\tB\003\340A\001\022\025\n" + + "\010trace_id\030\003 \001(\tB\003\340A\001\"\306\001\n" + + "\037ListMetastorePartitionsResponse\022R\n\n" + + "partitions\030\001" + + " \001(\0132<.google.cloud.bigquery.storage.v1beta.MetastorePartitionListH\000\022C\n" + + "\007streams\030\002" + + " \001(\01320.google.cloud.bigquery.storage.v1beta.StreamListH\000B\n\n" + + "\010response\"\335\001\n" + + " StreamMetastorePartitionsRequest\0225\n" + + "\006parent\030\001 \001(\tB%\340A\002\372A\037\n" + + "\035bigquery.googleapis.com/Table\022[\n" + + "\024metastore_partitions\030\002 \003(\01328" + + ".google.cloud.bigquery.storage.v1beta.MetastorePartitionB\003\340A\001\022%\n" + + "\030skip_existing_partitions\030\003 \001(\010B\003\340A\001\"u\n" + + "!StreamMetastorePartitionsResponse\022\'\n" + + "\037total_partitions_streamed_count\030\002 \001(\003\022\'\n" + + "\037total_partitions_inserted_count\030\003 \001(\003\"L\n" + + "\026BatchSizeTooLargeError\022\026\n" + + "\016max_batch_size\030\001 \001(\003\022\032\n\r" + + "error_message\030\002 \001(\tB\003\340A\0012\321\n\n" + + "\031MetastorePartitionService\022\216\002\n" + + "\036BatchCreateMetastorePartitions\022K.google.cloud.bigquery.storage.v1bet" + + "a.BatchCreateMetastorePartitionsRequest\032L.google.cloud.bigquery.storage.v1beta.B" + + "atchCreateMetastorePartitionsResponse\"Q\202" + + "\323\344\223\002K\"F/v1beta/{parent=projects/*/datase" + + "ts/*/tables/*}/partitions:batchCreate:\001*\022\330\001\n" + + "\036BatchDeleteMetastorePartitions\022K.google.cloud.bigquery.storage.v1beta.Batch" + + "DeleteMetastorePartitionsRequest\032\026.googl" + + "e.protobuf.Empty\"Q\202\323\344\223\002K\"F/v1beta/{paren" + + "t=projects/*/datasets/*/tables/*}/partitions:batchDelete:\001*\022\216\002\n" + + "\036BatchUpdateMetastorePartitions\022K.google.cloud.bigquery.s" + + "torage.v1beta.BatchUpdateMetastorePartitionsRequest\032L.google.cloud.bigquery.stor" + + "age.v1beta.BatchUpdateMetastorePartition" + + "sResponse\"Q\202\323\344\223\002K\"F/v1beta/{parent=proje" + + "cts/*/datasets/*/tables/*}/partitions:batchUpdate:\001*\022\204\002\n" + + "\027ListMetastorePartitions\022D.google.cloud.bigquery.storage.v1beta." + + "ListMetastorePartitionsRequest\032E.google.cloud.bigquery.storage.v1beta.ListMetast" + + "orePartitionsResponse\"\\\332A\006parent\202\323\344\223\002M\022K" + + "/v1beta/{parent=projects/*/locations/*/datasets/*/tables/*}/partitions:list\022\262\001\n" + + "\031StreamMetastorePartitions\022F.google.cloud" + + ".bigquery.storage.v1beta.StreamMetastorePartitionsRequest\032G.google.cloud.bigquer" + + "y.storage.v1beta.StreamMetastorePartitio" + + "nsResponse\"\000(\0010\001\032{\312A\036bigquerystorage.goo" + + "gleapis.com\322AWhttps://www.googleapis.com" + + "/auth/bigquery,https://www.googleapis.com/auth/cloud-platformB\266\002\n" + + "(com.google.cloud.bigquery.storage.v1betaB\036MetastorePar" + + "titionServiceProtoP\001ZBcloud.google.com/go/bigquery/storage/apiv1beta/storagepb;s" + + "toragepb\252\002$Google.Cloud.BigQuery.Storage" + + ".V1Beta\312\002$Google\\Cloud\\BigQuery\\Storage\\V1beta\352AU\n" + + "\035bigquery.googleapis.com/Table\0224projects/{project}/datasets/{dataset}/" + + "tables/{table}b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1beta_CreateMetastorePartitionRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta_CreateMetastorePartitionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_CreateMetastorePartitionRequest_descriptor, + new java.lang.String[] { + "Parent", "MetastorePartition", + }); + internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsRequest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "Requests", "SkipExistingPartitions", "TraceId", + }); + internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsResponse_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_BatchCreateMetastorePartitionsResponse_descriptor, + new java.lang.String[] { + "Partitions", + }); + internal_static_google_cloud_bigquery_storage_v1beta_BatchDeleteMetastorePartitionsRequest_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_bigquery_storage_v1beta_BatchDeleteMetastorePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_BatchDeleteMetastorePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "PartitionValues", "TraceId", + }); + internal_static_google_cloud_bigquery_storage_v1beta_UpdateMetastorePartitionRequest_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_bigquery_storage_v1beta_UpdateMetastorePartitionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_UpdateMetastorePartitionRequest_descriptor, + new java.lang.String[] { + "MetastorePartition", "UpdateMask", + }); + internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsRequest_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "Requests", "TraceId", + }); + internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsResponse_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_BatchUpdateMetastorePartitionsResponse_descriptor, + new java.lang.String[] { + "Partitions", + }); + internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsRequest_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "Filter", "TraceId", + }); + internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsResponse_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_ListMetastorePartitionsResponse_descriptor, + new java.lang.String[] { + "Partitions", "Streams", "Response", + }); + internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsRequest_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "MetastorePartitions", "SkipExistingPartitions", + }); + internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsResponse_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsResponse_descriptor, + new java.lang.String[] { + "TotalPartitionsStreamedCount", "TotalPartitionsInsertedCount", + }); + internal_static_google_cloud_bigquery_storage_v1beta_BatchSizeTooLargeError_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_google_cloud_bigquery_storage_v1beta_BatchSizeTooLargeError_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta_BatchSizeTooLargeError_descriptor, + new java.lang.String[] { + "MaxBatchSize", "ErrorMessage", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resourceDefinition); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValues.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValues.java new file mode 100644 index 000000000000..75cde009989d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValues.java @@ -0,0 +1,758 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Represents the values of a metastore partition.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.MetastorePartitionValues} + */ +public final class MetastorePartitionValues extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.MetastorePartitionValues) + MetastorePartitionValuesOrBuilder { + private static final long serialVersionUID = 0L; + + // Use MetastorePartitionValues.newBuilder() to construct. + private MetastorePartitionValues(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MetastorePartitionValues() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new MetastorePartitionValues(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionValues_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionValues_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.class, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.Builder.class); + } + + public static final int VALUES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + return values_; + } + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < values_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, values_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < values_.size(); i++) { + dataSize += computeStringSizeNoTag(values_.getRaw(i)); + } + size += dataSize; + size += 1 * getValuesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues other = + (com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues) obj; + + if (!getValuesList().equals(other.getValuesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Represents the values of a metastore partition.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.MetastorePartitionValues} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.MetastorePartitionValues) + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValuesOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionValues_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionValues_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.class, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_MetastorePartitionValues_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues build() { + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues buildPartial() { + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues result = + new com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + values_.makeImmutable(); + result.values_ = values_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues.getDefaultInstance()) + return this; + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ |= 0x00000001; + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureValuesIsMutable(); + values_.add(s); + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList values_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureValuesIsMutable() { + if (!values_.isModifiable()) { + values_ = new com.google.protobuf.LazyStringArrayList(values_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + values_.makeImmutable(); + return values_; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The values to set. + * @return This builder for chaining. + */ + public Builder setValues(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The values to add. + * @return This builder for chaining. + */ + public Builder addValues(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The values to add. + * @return This builder for chaining. + */ + public Builder addAllValues(java.lang.Iterable values) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearValues() { + values_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The values of the partition keys, where each value corresponds to
+     * a specific partition key in the order in which the keys are defined.
+     * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the values to add. + * @return This builder for chaining. + */ + public Builder addValuesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureValuesIsMutable(); + values_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.MetastorePartitionValues) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.MetastorePartitionValues) + private static final com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues(); + } + + public static com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MetastorePartitionValues parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionValues + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValuesOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValuesOrBuilder.java new file mode 100644 index 000000000000..a81b26c4b0c7 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValuesOrBuilder.java @@ -0,0 +1,84 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface MetastorePartitionValuesOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.MetastorePartitionValues) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the values. + */ + java.util.List getValuesList(); + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of values. + */ + int getValuesCount(); + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + java.lang.String getValues(int index); + + /** + * + * + *
+   * Required. The values of the partition keys, where each value corresponds to
+   * a specific partition key in the order in which the keys are defined.
+   * 
+ * + * repeated string values = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + com.google.protobuf.ByteString getValuesBytes(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStream.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStream.java new file mode 100644 index 000000000000..16764bcbbb75 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStream.java @@ -0,0 +1,655 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Information about a single stream that is used to read partitions.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.ReadStream} + */ +public final class ReadStream extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.ReadStream) + ReadStreamOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadStream.newBuilder() to construct. + private ReadStream(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadStream() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadStream(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_ReadStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_ReadStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.ReadStream.class, + com.google.cloud.bigquery.storage.v1beta.ReadStream.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Output only. Identifier. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. Identifier. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta.ReadStream)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.ReadStream other = + (com.google.cloud.bigquery.storage.v1beta.ReadStream) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1beta.ReadStream prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information about a single stream that is used to read partitions.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.ReadStream} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.ReadStream) + com.google.cloud.bigquery.storage.v1beta.ReadStreamOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_ReadStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_ReadStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.ReadStream.class, + com.google.cloud.bigquery.storage.v1beta.ReadStream.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta.ReadStream.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_ReadStream_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ReadStream getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.ReadStream.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ReadStream build() { + com.google.cloud.bigquery.storage.v1beta.ReadStream result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ReadStream buildPartial() { + com.google.cloud.bigquery.storage.v1beta.ReadStream result = + new com.google.cloud.bigquery.storage.v1beta.ReadStream(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta.ReadStream result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta.ReadStream) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta.ReadStream) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta.ReadStream other) { + if (other == com.google.cloud.bigquery.storage.v1beta.ReadStream.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Output only. Identifier. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. Identifier. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. Identifier. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Identifier. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Identifier. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.ReadStream) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.ReadStream) + private static final com.google.cloud.bigquery.storage.v1beta.ReadStream DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta.ReadStream(); + } + + public static com.google.cloud.bigquery.storage.v1beta.ReadStream getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadStream parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ReadStream getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStreamOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStreamOrBuilder.java new file mode 100644 index 000000000000..7bd58931f04c --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStreamOrBuilder.java @@ -0,0 +1,58 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface ReadStreamOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.ReadStream) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. Identifier. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Output only. Identifier. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.field_behavior) = IDENTIFIER]; + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfo.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfo.java new file mode 100644 index 000000000000..3b4d9550457e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfo.java @@ -0,0 +1,1236 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Serializer and deserializer information.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.SerDeInfo} + */ +public final class SerDeInfo extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.SerDeInfo) + SerDeInfoOrBuilder { + private static final long serialVersionUID = 0L; + + // Use SerDeInfo.newBuilder() to construct. + private SerDeInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SerDeInfo() { + name_ = ""; + serializationLibrary_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SerDeInfo(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 3: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.SerDeInfo.class, + com.google.cloud.bigquery.storage.v1beta.SerDeInfo.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Optional. Name of the SerDe.
+   * The maximum length is 256 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Name of the SerDe.
+   * The maximum length is 256 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SERIALIZATION_LIBRARY_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object serializationLibrary_ = ""; + + /** + * + * + *
+   * Required. Specifies a fully-qualified class name of the serialization
+   * library that is responsible for the translation of data between table
+   * representation and the underlying low-level input and output format
+   * structures. The maximum length is 256 characters.
+   * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The serializationLibrary. + */ + @java.lang.Override + public java.lang.String getSerializationLibrary() { + java.lang.Object ref = serializationLibrary_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serializationLibrary_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Specifies a fully-qualified class name of the serialization
+   * library that is responsible for the translation of data between table
+   * representation and the underlying low-level input and output format
+   * structures. The maximum length is 256 characters.
+   * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for serializationLibrary. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializationLibraryBytes() { + java.lang.Object ref = serializationLibrary_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + serializationLibrary_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARAMETERS_FIELD_NUMBER = 3; + + private static final class ParametersDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_ParametersEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField(ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(serializationLibrary_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, serializationLibrary_); + } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, internalGetParameters(), ParametersDefaultEntryHolder.defaultEntry, 3); + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(serializationLibrary_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, serializationLibrary_); + } + for (java.util.Map.Entry entry : + internalGetParameters().getMap().entrySet()) { + com.google.protobuf.MapEntry parameters__ = + ParametersDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, parameters__); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta.SerDeInfo)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.SerDeInfo other = + (com.google.cloud.bigquery.storage.v1beta.SerDeInfo) obj; + + if (!getName().equals(other.getName())) return false; + if (!getSerializationLibrary().equals(other.getSerializationLibrary())) return false; + if (!internalGetParameters().equals(other.internalGetParameters())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + SERIALIZATION_LIBRARY_FIELD_NUMBER; + hash = (53 * hash) + getSerializationLibrary().hashCode(); + if (!internalGetParameters().getMap().isEmpty()) { + hash = (37 * hash) + PARAMETERS_FIELD_NUMBER; + hash = (53 * hash) + internalGetParameters().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1beta.SerDeInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Serializer and deserializer information.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.SerDeInfo} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.SerDeInfo) + com.google.cloud.bigquery.storage.v1beta.SerDeInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 3: + return internalGetParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 3: + return internalGetMutableParameters(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.SerDeInfo.class, + com.google.cloud.bigquery.storage.v1beta.SerDeInfo.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta.SerDeInfo.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + serializationLibrary_ = ""; + internalGetMutableParameters().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_SerDeInfo_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.SerDeInfo getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.SerDeInfo.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.SerDeInfo build() { + com.google.cloud.bigquery.storage.v1beta.SerDeInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.SerDeInfo buildPartial() { + com.google.cloud.bigquery.storage.v1beta.SerDeInfo result = + new com.google.cloud.bigquery.storage.v1beta.SerDeInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta.SerDeInfo result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.serializationLibrary_ = serializationLibrary_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.parameters_ = internalGetParameters(); + result.parameters_.makeImmutable(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta.SerDeInfo) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta.SerDeInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta.SerDeInfo other) { + if (other == com.google.cloud.bigquery.storage.v1beta.SerDeInfo.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getSerializationLibrary().isEmpty()) { + serializationLibrary_ = other.serializationLibrary_; + bitField0_ |= 0x00000002; + onChanged(); + } + internalGetMutableParameters().mergeFrom(other.internalGetParameters()); + bitField0_ |= 0x00000004; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + serializationLibrary_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + com.google.protobuf.MapEntry parameters__ = + input.readMessage( + ParametersDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableParameters() + .getMutableMap() + .put(parameters__.getKey(), parameters__.getValue()); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Optional. Name of the SerDe.
+     * The maximum length is 256 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Name of the SerDe.
+     * The maximum length is 256 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Name of the SerDe.
+     * The maximum length is 256 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Name of the SerDe.
+     * The maximum length is 256 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Name of the SerDe.
+     * The maximum length is 256 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object serializationLibrary_ = ""; + + /** + * + * + *
+     * Required. Specifies a fully-qualified class name of the serialization
+     * library that is responsible for the translation of data between table
+     * representation and the underlying low-level input and output format
+     * structures. The maximum length is 256 characters.
+     * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The serializationLibrary. + */ + public java.lang.String getSerializationLibrary() { + java.lang.Object ref = serializationLibrary_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serializationLibrary_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Specifies a fully-qualified class name of the serialization
+     * library that is responsible for the translation of data between table
+     * representation and the underlying low-level input and output format
+     * structures. The maximum length is 256 characters.
+     * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for serializationLibrary. + */ + public com.google.protobuf.ByteString getSerializationLibraryBytes() { + java.lang.Object ref = serializationLibrary_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + serializationLibrary_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Specifies a fully-qualified class name of the serialization
+     * library that is responsible for the translation of data between table
+     * representation and the underlying low-level input and output format
+     * structures. The maximum length is 256 characters.
+     * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The serializationLibrary to set. + * @return This builder for chaining. + */ + public Builder setSerializationLibrary(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + serializationLibrary_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Specifies a fully-qualified class name of the serialization
+     * library that is responsible for the translation of data between table
+     * representation and the underlying low-level input and output format
+     * structures. The maximum length is 256 characters.
+     * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearSerializationLibrary() { + serializationLibrary_ = getDefaultInstance().getSerializationLibrary(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Specifies a fully-qualified class name of the serialization
+     * library that is responsible for the translation of data between table
+     * representation and the underlying low-level input and output format
+     * structures. The maximum length is 256 characters.
+     * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for serializationLibrary to set. + * @return This builder for chaining. + */ + public Builder setSerializationLibraryBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + serializationLibrary_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.MapField parameters_; + + private com.google.protobuf.MapField + internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + private com.google.protobuf.MapField + internalGetMutableParameters() { + if (parameters_ == null) { + parameters_ = + com.google.protobuf.MapField.newMapField(ParametersDefaultEntryHolder.defaultEntry); + } + if (!parameters_.isMutable()) { + parameters_ = parameters_.copy(); + } + bitField0_ |= 0x00000004; + onChanged(); + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParameters().getMap().containsKey(key); + } + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.lang.String getParametersOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearParameters() { + bitField0_ = (bitField0_ & ~0x00000004); + internalGetMutableParameters().getMutableMap().clear(); + return this; + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeParameters(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableParameters().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableParameters() { + bitField0_ |= 0x00000004; + return internalGetMutableParameters().getMutableMap(); + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putParameters(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableParameters().getMutableMap().put(key, value); + bitField0_ |= 0x00000004; + return this; + } + + /** + * + * + *
+     * Optional. Key-value pairs that define the initialization parameters for the
+     * serialization library.
+     * Maximum size 10 Kib.
+     * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllParameters(java.util.Map values) { + internalGetMutableParameters().getMutableMap().putAll(values); + bitField0_ |= 0x00000004; + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.SerDeInfo) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.SerDeInfo) + private static final com.google.cloud.bigquery.storage.v1beta.SerDeInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta.SerDeInfo(); + } + + public static com.google.cloud.bigquery.storage.v1beta.SerDeInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SerDeInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.SerDeInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfoOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfoOrBuilder.java new file mode 100644 index 000000000000..dfc9ffa34f2e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfoOrBuilder.java @@ -0,0 +1,164 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface SerDeInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.SerDeInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. Name of the SerDe.
+   * The maximum length is 256 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Optional. Name of the SerDe.
+   * The maximum length is 256 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Required. Specifies a fully-qualified class name of the serialization
+   * library that is responsible for the translation of data between table
+   * representation and the underlying low-level input and output format
+   * structures. The maximum length is 256 characters.
+   * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The serializationLibrary. + */ + java.lang.String getSerializationLibrary(); + + /** + * + * + *
+   * Required. Specifies a fully-qualified class name of the serialization
+   * library that is responsible for the translation of data between table
+   * representation and the underlying low-level input and output format
+   * structures. The maximum length is 256 characters.
+   * 
+ * + * string serialization_library = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for serializationLibrary. + */ + com.google.protobuf.ByteString getSerializationLibraryBytes(); + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getParametersCount(); + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsParameters(java.lang.String key); + + /** Use {@link #getParametersMap()} instead. */ + @java.lang.Deprecated + java.util.Map getParameters(); + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getParametersMap(); + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + /* nullable */ + java.lang.String getParametersOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
+   * Optional. Key-value pairs that define the initialization parameters for the
+   * serialization library.
+   * Maximum size 10 Kib.
+   * 
+ * + * map<string, string> parameters = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getParametersOrThrow(java.lang.String key); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptor.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptor.java new file mode 100644 index 000000000000..ed4a0b30b446 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptor.java @@ -0,0 +1,1374 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Contains information about the physical storage of the data in the metastore
+ * partition.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.StorageDescriptor} + */ +public final class StorageDescriptor extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.StorageDescriptor) + StorageDescriptorOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StorageDescriptor.newBuilder() to construct. + private StorageDescriptor(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StorageDescriptor() { + locationUri_ = ""; + inputFormat_ = ""; + outputFormat_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StorageDescriptor(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_StorageDescriptor_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_StorageDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.class, + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.Builder.class); + } + + private int bitField0_; + public static final int LOCATION_URI_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object locationUri_ = ""; + + /** + * + * + *
+   * Optional. The physical location of the metastore partition
+   * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+   * `gs://spark-dataproc-data/pangea-data/*`).
+   * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The locationUri. + */ + @java.lang.Override + public java.lang.String getLocationUri() { + java.lang.Object ref = locationUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationUri_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The physical location of the metastore partition
+   * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+   * `gs://spark-dataproc-data/pangea-data/*`).
+   * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for locationUri. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationUriBytes() { + java.lang.Object ref = locationUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INPUT_FORMAT_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object inputFormat_ = ""; + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the InputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The inputFormat. + */ + @java.lang.Override + public java.lang.String getInputFormat() { + java.lang.Object ref = inputFormat_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + inputFormat_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the InputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for inputFormat. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInputFormatBytes() { + java.lang.Object ref = inputFormat_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + inputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OUTPUT_FORMAT_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object outputFormat_ = ""; + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the OutputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The outputFormat. + */ + @java.lang.Override + public java.lang.String getOutputFormat() { + java.lang.Object ref = outputFormat_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + outputFormat_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the OutputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for outputFormat. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOutputFormatBytes() { + java.lang.Object ref = outputFormat_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + outputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SERDE_INFO_FIELD_NUMBER = 4; + private com.google.cloud.bigquery.storage.v1beta.SerDeInfo serdeInfo_; + + /** + * + * + *
+   * Optional. Serializer and deserializer information.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the serdeInfo field is set. + */ + @java.lang.Override + public boolean hasSerdeInfo() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Optional. Serializer and deserializer information.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The serdeInfo. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.SerDeInfo getSerdeInfo() { + return serdeInfo_ == null + ? com.google.cloud.bigquery.storage.v1beta.SerDeInfo.getDefaultInstance() + : serdeInfo_; + } + + /** + * + * + *
+   * Optional. Serializer and deserializer information.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.SerDeInfoOrBuilder getSerdeInfoOrBuilder() { + return serdeInfo_ == null + ? com.google.cloud.bigquery.storage.v1beta.SerDeInfo.getDefaultInstance() + : serdeInfo_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(locationUri_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, locationUri_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(inputFormat_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, inputFormat_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(outputFormat_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, outputFormat_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getSerdeInfo()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(locationUri_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, locationUri_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(inputFormat_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, inputFormat_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(outputFormat_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, outputFormat_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getSerdeInfo()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta.StorageDescriptor)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor other = + (com.google.cloud.bigquery.storage.v1beta.StorageDescriptor) obj; + + if (!getLocationUri().equals(other.getLocationUri())) return false; + if (!getInputFormat().equals(other.getInputFormat())) return false; + if (!getOutputFormat().equals(other.getOutputFormat())) return false; + if (hasSerdeInfo() != other.hasSerdeInfo()) return false; + if (hasSerdeInfo()) { + if (!getSerdeInfo().equals(other.getSerdeInfo())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + LOCATION_URI_FIELD_NUMBER; + hash = (53 * hash) + getLocationUri().hashCode(); + hash = (37 * hash) + INPUT_FORMAT_FIELD_NUMBER; + hash = (53 * hash) + getInputFormat().hashCode(); + hash = (37 * hash) + OUTPUT_FORMAT_FIELD_NUMBER; + hash = (53 * hash) + getOutputFormat().hashCode(); + if (hasSerdeInfo()) { + hash = (37 * hash) + SERDE_INFO_FIELD_NUMBER; + hash = (53 * hash) + getSerdeInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Contains information about the physical storage of the data in the metastore
+   * partition.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.StorageDescriptor} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.StorageDescriptor) + com.google.cloud.bigquery.storage.v1beta.StorageDescriptorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_StorageDescriptor_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_StorageDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.class, + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getSerdeInfoFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + locationUri_ = ""; + inputFormat_ = ""; + outputFormat_ = ""; + serdeInfo_ = null; + if (serdeInfoBuilder_ != null) { + serdeInfoBuilder_.dispose(); + serdeInfoBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_StorageDescriptor_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StorageDescriptor getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StorageDescriptor build() { + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StorageDescriptor buildPartial() { + com.google.cloud.bigquery.storage.v1beta.StorageDescriptor result = + new com.google.cloud.bigquery.storage.v1beta.StorageDescriptor(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta.StorageDescriptor result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.locationUri_ = locationUri_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.inputFormat_ = inputFormat_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.outputFormat_ = outputFormat_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.serdeInfo_ = serdeInfoBuilder_ == null ? serdeInfo_ : serdeInfoBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta.StorageDescriptor) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta.StorageDescriptor) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta.StorageDescriptor other) { + if (other == com.google.cloud.bigquery.storage.v1beta.StorageDescriptor.getDefaultInstance()) + return this; + if (!other.getLocationUri().isEmpty()) { + locationUri_ = other.locationUri_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInputFormat().isEmpty()) { + inputFormat_ = other.inputFormat_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getOutputFormat().isEmpty()) { + outputFormat_ = other.outputFormat_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasSerdeInfo()) { + mergeSerdeInfo(other.getSerdeInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + locationUri_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + inputFormat_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + outputFormat_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage(getSerdeInfoFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object locationUri_ = ""; + + /** + * + * + *
+     * Optional. The physical location of the metastore partition
+     * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+     * `gs://spark-dataproc-data/pangea-data/*`).
+     * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The locationUri. + */ + public java.lang.String getLocationUri() { + java.lang.Object ref = locationUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + locationUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The physical location of the metastore partition
+     * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+     * `gs://spark-dataproc-data/pangea-data/*`).
+     * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for locationUri. + */ + public com.google.protobuf.ByteString getLocationUriBytes() { + java.lang.Object ref = locationUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + locationUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The physical location of the metastore partition
+     * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+     * `gs://spark-dataproc-data/pangea-data/*`).
+     * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The locationUri to set. + * @return This builder for chaining. + */ + public Builder setLocationUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + locationUri_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The physical location of the metastore partition
+     * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+     * `gs://spark-dataproc-data/pangea-data/*`).
+     * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLocationUri() { + locationUri_ = getDefaultInstance().getLocationUri(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The physical location of the metastore partition
+     * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+     * `gs://spark-dataproc-data/pangea-data/*`).
+     * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for locationUri to set. + * @return This builder for chaining. + */ + public Builder setLocationUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + locationUri_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object inputFormat_ = ""; + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the InputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The inputFormat. + */ + public java.lang.String getInputFormat() { + java.lang.Object ref = inputFormat_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + inputFormat_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the InputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for inputFormat. + */ + public com.google.protobuf.ByteString getInputFormatBytes() { + java.lang.Object ref = inputFormat_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + inputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the InputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The inputFormat to set. + * @return This builder for chaining. + */ + public Builder setInputFormat(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + inputFormat_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the InputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearInputFormat() { + inputFormat_ = getDefaultInstance().getInputFormat(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the InputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for inputFormat to set. + * @return This builder for chaining. + */ + public Builder setInputFormatBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + inputFormat_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object outputFormat_ = ""; + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the OutputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The outputFormat. + */ + public java.lang.String getOutputFormat() { + java.lang.Object ref = outputFormat_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + outputFormat_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the OutputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for outputFormat. + */ + public com.google.protobuf.ByteString getOutputFormatBytes() { + java.lang.Object ref = outputFormat_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + outputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the OutputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The outputFormat to set. + * @return This builder for chaining. + */ + public Builder setOutputFormat(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + outputFormat_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the OutputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearOutputFormat() { + outputFormat_ = getDefaultInstance().getOutputFormat(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Specifies the fully qualified class name of the OutputFormat
+     * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+     * The maximum length is 128 characters.
+     * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for outputFormat to set. + * @return This builder for chaining. + */ + public Builder setOutputFormatBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + outputFormat_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta.SerDeInfo serdeInfo_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.SerDeInfo, + com.google.cloud.bigquery.storage.v1beta.SerDeInfo.Builder, + com.google.cloud.bigquery.storage.v1beta.SerDeInfoOrBuilder> + serdeInfoBuilder_; + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the serdeInfo field is set. + */ + public boolean hasSerdeInfo() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The serdeInfo. + */ + public com.google.cloud.bigquery.storage.v1beta.SerDeInfo getSerdeInfo() { + if (serdeInfoBuilder_ == null) { + return serdeInfo_ == null + ? com.google.cloud.bigquery.storage.v1beta.SerDeInfo.getDefaultInstance() + : serdeInfo_; + } else { + return serdeInfoBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSerdeInfo(com.google.cloud.bigquery.storage.v1beta.SerDeInfo value) { + if (serdeInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + serdeInfo_ = value; + } else { + serdeInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSerdeInfo( + com.google.cloud.bigquery.storage.v1beta.SerDeInfo.Builder builderForValue) { + if (serdeInfoBuilder_ == null) { + serdeInfo_ = builderForValue.build(); + } else { + serdeInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeSerdeInfo(com.google.cloud.bigquery.storage.v1beta.SerDeInfo value) { + if (serdeInfoBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && serdeInfo_ != null + && serdeInfo_ + != com.google.cloud.bigquery.storage.v1beta.SerDeInfo.getDefaultInstance()) { + getSerdeInfoBuilder().mergeFrom(value); + } else { + serdeInfo_ = value; + } + } else { + serdeInfoBuilder_.mergeFrom(value); + } + if (serdeInfo_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSerdeInfo() { + bitField0_ = (bitField0_ & ~0x00000008); + serdeInfo_ = null; + if (serdeInfoBuilder_ != null) { + serdeInfoBuilder_.dispose(); + serdeInfoBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.SerDeInfo.Builder getSerdeInfoBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getSerdeInfoFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.SerDeInfoOrBuilder getSerdeInfoOrBuilder() { + if (serdeInfoBuilder_ != null) { + return serdeInfoBuilder_.getMessageOrBuilder(); + } else { + return serdeInfo_ == null + ? com.google.cloud.bigquery.storage.v1beta.SerDeInfo.getDefaultInstance() + : serdeInfo_; + } + } + + /** + * + * + *
+     * Optional. Serializer and deserializer information.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.SerDeInfo, + com.google.cloud.bigquery.storage.v1beta.SerDeInfo.Builder, + com.google.cloud.bigquery.storage.v1beta.SerDeInfoOrBuilder> + getSerdeInfoFieldBuilder() { + if (serdeInfoBuilder_ == null) { + serdeInfoBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.SerDeInfo, + com.google.cloud.bigquery.storage.v1beta.SerDeInfo.Builder, + com.google.cloud.bigquery.storage.v1beta.SerDeInfoOrBuilder>( + getSerdeInfo(), getParentForChildren(), isClean()); + serdeInfo_ = null; + } + return serdeInfoBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.StorageDescriptor) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.StorageDescriptor) + private static final com.google.cloud.bigquery.storage.v1beta.StorageDescriptor DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta.StorageDescriptor(); + } + + public static com.google.cloud.bigquery.storage.v1beta.StorageDescriptor getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StorageDescriptor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StorageDescriptor getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptorOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptorOrBuilder.java new file mode 100644 index 000000000000..f76b8548d2a6 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptorOrBuilder.java @@ -0,0 +1,159 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface StorageDescriptorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.StorageDescriptor) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. The physical location of the metastore partition
+   * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+   * `gs://spark-dataproc-data/pangea-data/*`).
+   * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The locationUri. + */ + java.lang.String getLocationUri(); + + /** + * + * + *
+   * Optional. The physical location of the metastore partition
+   * (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or
+   * `gs://spark-dataproc-data/pangea-data/*`).
+   * 
+ * + * string location_uri = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for locationUri. + */ + com.google.protobuf.ByteString getLocationUriBytes(); + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the InputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The inputFormat. + */ + java.lang.String getInputFormat(); + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the InputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string input_format = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for inputFormat. + */ + com.google.protobuf.ByteString getInputFormatBytes(); + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the OutputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The outputFormat. + */ + java.lang.String getOutputFormat(); + + /** + * + * + *
+   * Optional. Specifies the fully qualified class name of the OutputFormat
+   * (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").
+   * The maximum length is 128 characters.
+   * 
+ * + * string output_format = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for outputFormat. + */ + com.google.protobuf.ByteString getOutputFormatBytes(); + + /** + * + * + *
+   * Optional. Serializer and deserializer information.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the serdeInfo field is set. + */ + boolean hasSerdeInfo(); + + /** + * + * + *
+   * Optional. Serializer and deserializer information.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The serdeInfo. + */ + com.google.cloud.bigquery.storage.v1beta.SerDeInfo getSerdeInfo(); + + /** + * + * + *
+   * Optional. Serializer and deserializer information.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.SerDeInfo serde_info = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta.SerDeInfoOrBuilder getSerdeInfoOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamList.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamList.java new file mode 100644 index 000000000000..c74861ff06fa --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamList.java @@ -0,0 +1,1016 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * List of streams.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.StreamList} + */ +public final class StreamList extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.StreamList) + StreamListOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StreamList.newBuilder() to construct. + private StreamList(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamList() { + streams_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StreamList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.StreamList.class, + com.google.cloud.bigquery.storage.v1beta.StreamList.Builder.class); + } + + public static final int STREAMS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List streams_; + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List getStreamsList() { + return streams_; + } + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getStreamsOrBuilderList() { + return streams_; + } + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getStreamsCount() { + return streams_.size(); + } + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ReadStream getStreams(int index) { + return streams_.get(index); + } + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.ReadStreamOrBuilder getStreamsOrBuilder( + int index) { + return streams_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < streams_.size(); i++) { + output.writeMessage(1, streams_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < streams_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, streams_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta.StreamList)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.StreamList other = + (com.google.cloud.bigquery.storage.v1beta.StreamList) obj; + + if (!getStreamsList().equals(other.getStreamsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getStreamsCount() > 0) { + hash = (37 * hash) + STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getStreamsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1beta.StreamList prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * List of streams.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.StreamList} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.StreamList) + com.google.cloud.bigquery.storage.v1beta.StreamListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.StreamList.class, + com.google.cloud.bigquery.storage.v1beta.StreamList.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta.StreamList.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + } else { + streams_ = null; + streamsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamList_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamList getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.StreamList.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamList build() { + com.google.cloud.bigquery.storage.v1beta.StreamList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamList buildPartial() { + com.google.cloud.bigquery.storage.v1beta.StreamList result = + new com.google.cloud.bigquery.storage.v1beta.StreamList(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta.StreamList result) { + if (streamsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + streams_ = java.util.Collections.unmodifiableList(streams_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.streams_ = streams_; + } else { + result.streams_ = streamsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta.StreamList result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta.StreamList) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta.StreamList) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta.StreamList other) { + if (other == com.google.cloud.bigquery.storage.v1beta.StreamList.getDefaultInstance()) + return this; + if (streamsBuilder_ == null) { + if (!other.streams_.isEmpty()) { + if (streams_.isEmpty()) { + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureStreamsIsMutable(); + streams_.addAll(other.streams_); + } + onChanged(); + } + } else { + if (!other.streams_.isEmpty()) { + if (streamsBuilder_.isEmpty()) { + streamsBuilder_.dispose(); + streamsBuilder_ = null; + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000001); + streamsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getStreamsFieldBuilder() + : null; + } else { + streamsBuilder_.addAllMessages(other.streams_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1beta.ReadStream m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta.ReadStream.parser(), + extensionRegistry); + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(m); + } else { + streamsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List streams_ = + java.util.Collections.emptyList(); + + private void ensureStreamsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + streams_ = + new java.util.ArrayList(streams_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.ReadStream, + com.google.cloud.bigquery.storage.v1beta.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta.ReadStreamOrBuilder> + streamsBuilder_; + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List getStreamsList() { + if (streamsBuilder_ == null) { + return java.util.Collections.unmodifiableList(streams_); + } else { + return streamsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getStreamsCount() { + if (streamsBuilder_ == null) { + return streams_.size(); + } else { + return streamsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.ReadStream getStreams(int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStreams( + int index, com.google.cloud.bigquery.storage.v1beta.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.set(index, value); + onChanged(); + } else { + streamsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStreams( + int index, com.google.cloud.bigquery.storage.v1beta.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.set(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams(com.google.cloud.bigquery.storage.v1beta.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(value); + onChanged(); + } else { + streamsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + int index, com.google.cloud.bigquery.storage.v1beta.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(index, value); + onChanged(); + } else { + streamsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + com.google.cloud.bigquery.storage.v1beta.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + int index, com.google.cloud.bigquery.storage.v1beta.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllStreams( + java.lang.Iterable values) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, streams_); + onChanged(); + } else { + streamsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearStreams() { + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + streamsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeStreams(int index) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.remove(index); + onChanged(); + } else { + streamsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.ReadStream.Builder getStreamsBuilder( + int index) { + return getStreamsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.ReadStreamOrBuilder getStreamsOrBuilder( + int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStreamsOrBuilderList() { + if (streamsBuilder_ != null) { + return streamsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(streams_); + } + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.ReadStream.Builder addStreamsBuilder() { + return getStreamsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1beta.ReadStream.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.ReadStream.Builder addStreamsBuilder( + int index) { + return getStreamsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1beta.ReadStream.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. List of streams.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStreamsBuilderList() { + return getStreamsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.ReadStream, + com.google.cloud.bigquery.storage.v1beta.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta.ReadStreamOrBuilder> + getStreamsFieldBuilder() { + if (streamsBuilder_ == null) { + streamsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.ReadStream, + com.google.cloud.bigquery.storage.v1beta.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta.ReadStreamOrBuilder>( + streams_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + streams_ = null; + } + return streamsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.StreamList) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.StreamList) + private static final com.google.cloud.bigquery.storage.v1beta.StreamList DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta.StreamList(); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamList getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StreamList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamList getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamListOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamListOrBuilder.java new file mode 100644 index 000000000000..65310bfeea9b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamListOrBuilder.java @@ -0,0 +1,92 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface StreamListOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.StreamList) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getStreamsList(); + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta.ReadStream getStreams(int index); + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getStreamsCount(); + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getStreamsOrBuilderList(); + + /** + * + * + *
+   * Output only. List of streams.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.ReadStream streams = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta.ReadStreamOrBuilder getStreamsOrBuilder(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequest.java new file mode 100644 index 000000000000..0202706916dc --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequest.java @@ -0,0 +1,1401 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * The top-level message sent by the client to the
+ * [Partitions.StreamMetastorePartitions][] method.
+ * Follows the default gRPC streaming maximum size of 4 MB.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest} + */ +public final class StreamMetastorePartitionsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest) + StreamMetastorePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StreamMetastorePartitionsRequest.newBuilder() to construct. + private StreamMetastorePartitionsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamMetastorePartitionsRequest() { + parent_ = ""; + metastorePartitions_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StreamMetastorePartitionsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest.Builder + .class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to where the partition to be added, in the
+   * format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to where the partition to be added, in the
+   * format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int METASTORE_PARTITIONS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List + metastorePartitions_; + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getMetastorePartitionsList() { + return metastorePartitions_; + } + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getMetastorePartitionsOrBuilderList() { + return metastorePartitions_; + } + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getMetastorePartitionsCount() { + return metastorePartitions_.size(); + } + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getMetastorePartitions( + int index) { + return metastorePartitions_.get(index); + } + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getMetastorePartitionsOrBuilder(int index) { + return metastorePartitions_.get(index); + } + + public static final int SKIP_EXISTING_PARTITIONS_FIELD_NUMBER = 3; + private boolean skipExistingPartitions_ = false; + + /** + * + * + *
+   * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+   * add_partitions(..). If the flag is set to false, the server will return
+   * ALREADY_EXISTS on commit if any partition already exists. If the flag is
+   * set to true:
+   *  1) the server will skip existing partitions
+   *  insert only the non-existing partitions as part of the commit.
+   *  2) The client must set the `skip_existing_partitions` field to true for
+   *  all requests in the stream.
+   * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + @java.lang.Override + public boolean getSkipExistingPartitions() { + return skipExistingPartitions_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + for (int i = 0; i < metastorePartitions_.size(); i++) { + output.writeMessage(2, metastorePartitions_.get(i)); + } + if (skipExistingPartitions_ != false) { + output.writeBool(3, skipExistingPartitions_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + for (int i = 0; i < metastorePartitions_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(2, metastorePartitions_.get(i)); + } + if (skipExistingPartitions_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, skipExistingPartitions_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest other = + (com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getMetastorePartitionsList().equals(other.getMetastorePartitionsList())) return false; + if (getSkipExistingPartitions() != other.getSkipExistingPartitions()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getMetastorePartitionsCount() > 0) { + hash = (37 * hash) + METASTORE_PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getMetastorePartitionsList().hashCode(); + } + hash = (37 * hash) + SKIP_EXISTING_PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSkipExistingPartitions()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * The top-level message sent by the client to the
+   * [Partitions.StreamMetastorePartitions][] method.
+   * Follows the default gRPC streaming maximum size of 4 MB.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest) + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest.class, + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + if (metastorePartitionsBuilder_ == null) { + metastorePartitions_ = java.util.Collections.emptyList(); + } else { + metastorePartitions_ = null; + metastorePartitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + skipExistingPartitions_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest build() { + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest result = + new com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest result) { + if (metastorePartitionsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + metastorePartitions_ = java.util.Collections.unmodifiableList(metastorePartitions_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.metastorePartitions_ = metastorePartitions_; + } else { + result.metastorePartitions_ = metastorePartitionsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.skipExistingPartitions_ = skipExistingPartitions_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (metastorePartitionsBuilder_ == null) { + if (!other.metastorePartitions_.isEmpty()) { + if (metastorePartitions_.isEmpty()) { + metastorePartitions_ = other.metastorePartitions_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.addAll(other.metastorePartitions_); + } + onChanged(); + } + } else { + if (!other.metastorePartitions_.isEmpty()) { + if (metastorePartitionsBuilder_.isEmpty()) { + metastorePartitionsBuilder_.dispose(); + metastorePartitionsBuilder_ = null; + metastorePartitions_ = other.metastorePartitions_; + bitField0_ = (bitField0_ & ~0x00000002); + metastorePartitionsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getMetastorePartitionsFieldBuilder() + : null; + } else { + metastorePartitionsBuilder_.addAllMessages(other.metastorePartitions_); + } + } + } + if (other.getSkipExistingPartitions() != false) { + setSkipExistingPartitions(other.getSkipExistingPartitions()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.bigquery.storage.v1beta.MetastorePartition m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.parser(), + extensionRegistry); + if (metastorePartitionsBuilder_ == null) { + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.add(m); + } else { + metastorePartitionsBuilder_.addMessage(m); + } + break; + } // case 18 + case 24: + { + skipExistingPartitions_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to where the partition to be added, in the
+     * format of
+     * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List + metastorePartitions_ = java.util.Collections.emptyList(); + + private void ensureMetastorePartitionsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + metastorePartitions_ = + new java.util.ArrayList( + metastorePartitions_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + metastorePartitionsBuilder_; + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getMetastorePartitionsList() { + if (metastorePartitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(metastorePartitions_); + } else { + return metastorePartitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getMetastorePartitionsCount() { + if (metastorePartitionsBuilder_ == null) { + return metastorePartitions_.size(); + } else { + return metastorePartitionsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getMetastorePartitions( + int index) { + if (metastorePartitionsBuilder_ == null) { + return metastorePartitions_.get(index); + } else { + return metastorePartitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setMetastorePartitions( + int index, com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (metastorePartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.set(index, value); + onChanged(); + } else { + metastorePartitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setMetastorePartitions( + int index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (metastorePartitionsBuilder_ == null) { + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.set(index, builderForValue.build()); + onChanged(); + } else { + metastorePartitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (metastorePartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.add(value); + onChanged(); + } else { + metastorePartitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addMetastorePartitions( + int index, com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (metastorePartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.add(index, value); + onChanged(); + } else { + metastorePartitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addMetastorePartitions( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (metastorePartitionsBuilder_ == null) { + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.add(builderForValue.build()); + onChanged(); + } else { + metastorePartitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addMetastorePartitions( + int index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (metastorePartitionsBuilder_ == null) { + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.add(index, builderForValue.build()); + onChanged(); + } else { + metastorePartitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllMetastorePartitions( + java.lang.Iterable + values) { + if (metastorePartitionsBuilder_ == null) { + ensureMetastorePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, metastorePartitions_); + onChanged(); + } else { + metastorePartitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearMetastorePartitions() { + if (metastorePartitionsBuilder_ == null) { + metastorePartitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + metastorePartitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeMetastorePartitions(int index) { + if (metastorePartitionsBuilder_ == null) { + ensureMetastorePartitionsIsMutable(); + metastorePartitions_.remove(index); + onChanged(); + } else { + metastorePartitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder + getMetastorePartitionsBuilder(int index) { + return getMetastorePartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getMetastorePartitionsOrBuilder(int index) { + if (metastorePartitionsBuilder_ == null) { + return metastorePartitions_.get(index); + } else { + return metastorePartitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getMetastorePartitionsOrBuilderList() { + if (metastorePartitionsBuilder_ != null) { + return metastorePartitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(metastorePartitions_); + } + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder + addMetastorePartitionsBuilder() { + return getMetastorePartitionsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder + addMetastorePartitionsBuilder(int index) { + return getMetastorePartitionsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. A list of metastore partitions to be added to the table.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getMetastorePartitionsBuilderList() { + return getMetastorePartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getMetastorePartitionsFieldBuilder() { + if (metastorePartitionsBuilder_ == null) { + metastorePartitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder>( + metastorePartitions_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + metastorePartitions_ = null; + } + return metastorePartitionsBuilder_; + } + + private boolean skipExistingPartitions_; + + /** + * + * + *
+     * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+     * add_partitions(..). If the flag is set to false, the server will return
+     * ALREADY_EXISTS on commit if any partition already exists. If the flag is
+     * set to true:
+     *  1) the server will skip existing partitions
+     *  insert only the non-existing partitions as part of the commit.
+     *  2) The client must set the `skip_existing_partitions` field to true for
+     *  all requests in the stream.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + @java.lang.Override + public boolean getSkipExistingPartitions() { + return skipExistingPartitions_; + } + + /** + * + * + *
+     * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+     * add_partitions(..). If the flag is set to false, the server will return
+     * ALREADY_EXISTS on commit if any partition already exists. If the flag is
+     * set to true:
+     *  1) the server will skip existing partitions
+     *  insert only the non-existing partitions as part of the commit.
+     *  2) The client must set the `skip_existing_partitions` field to true for
+     *  all requests in the stream.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The skipExistingPartitions to set. + * @return This builder for chaining. + */ + public Builder setSkipExistingPartitions(boolean value) { + + skipExistingPartitions_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+     * add_partitions(..). If the flag is set to false, the server will return
+     * ALREADY_EXISTS on commit if any partition already exists. If the flag is
+     * set to true:
+     *  1) the server will skip existing partitions
+     *  insert only the non-existing partitions as part of the commit.
+     *  2) The client must set the `skip_existing_partitions` field to true for
+     *  all requests in the stream.
+     * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearSkipExistingPartitions() { + bitField0_ = (bitField0_ & ~0x00000004); + skipExistingPartitions_ = false; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest) + private static final com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StreamMetastorePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..5cdf176e1f91 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequestOrBuilder.java @@ -0,0 +1,148 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface StreamMetastorePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to where the partition to be added, in the
+   * format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to where the partition to be added, in the
+   * format of
+   * projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getMetastorePartitionsList(); + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartition getMetastorePartitions(int index); + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getMetastorePartitionsCount(); + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getMetastorePartitionsOrBuilderList(); + + /** + * + * + *
+   * Optional. A list of metastore partitions to be added to the table.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partitions = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getMetastorePartitionsOrBuilder(int index); + + /** + * + * + *
+   * Optional. Mimics the ifNotExists flag in IMetaStoreClient
+   * add_partitions(..). If the flag is set to false, the server will return
+   * ALREADY_EXISTS on commit if any partition already exists. If the flag is
+   * set to true:
+   *  1) the server will skip existing partitions
+   *  insert only the non-existing partitions as part of the commit.
+   *  2) The client must set the `skip_existing_partitions` field to true for
+   *  all requests in the stream.
+   * 
+ * + * bool skip_existing_partitions = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The skipExistingPartitions. + */ + boolean getSkipExistingPartitions(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponse.java new file mode 100644 index 000000000000..27708ebee632 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponse.java @@ -0,0 +1,687 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * This is the response message sent by the server
+ * to the client for the [Partitions.StreamMetastorePartitions][] method when
+ * the commit is successful. Server will close the stream after sending this
+ * message.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse} + */ +public final class StreamMetastorePartitionsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse) + StreamMetastorePartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StreamMetastorePartitionsResponse.newBuilder() to construct. + private StreamMetastorePartitionsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamMetastorePartitionsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StreamMetastorePartitionsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse.Builder + .class); + } + + public static final int TOTAL_PARTITIONS_STREAMED_COUNT_FIELD_NUMBER = 2; + private long totalPartitionsStreamedCount_ = 0L; + + /** + * + * + *
+   * Total count of partitions streamed by the client during the lifetime of the
+   * stream. This is only set in the final response message before closing the
+   * stream.
+   * 
+ * + * int64 total_partitions_streamed_count = 2; + * + * @return The totalPartitionsStreamedCount. + */ + @java.lang.Override + public long getTotalPartitionsStreamedCount() { + return totalPartitionsStreamedCount_; + } + + public static final int TOTAL_PARTITIONS_INSERTED_COUNT_FIELD_NUMBER = 3; + private long totalPartitionsInsertedCount_ = 0L; + + /** + * + * + *
+   * Total count of partitions inserted by the server during the lifetime of the
+   * stream. This is only set in the final response message before closing the
+   * stream.
+   * 
+ * + * int64 total_partitions_inserted_count = 3; + * + * @return The totalPartitionsInsertedCount. + */ + @java.lang.Override + public long getTotalPartitionsInsertedCount() { + return totalPartitionsInsertedCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (totalPartitionsStreamedCount_ != 0L) { + output.writeInt64(2, totalPartitionsStreamedCount_); + } + if (totalPartitionsInsertedCount_ != 0L) { + output.writeInt64(3, totalPartitionsInsertedCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (totalPartitionsStreamedCount_ != 0L) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size(2, totalPartitionsStreamedCount_); + } + if (totalPartitionsInsertedCount_ != 0L) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size(3, totalPartitionsInsertedCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse other = + (com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse) obj; + + if (getTotalPartitionsStreamedCount() != other.getTotalPartitionsStreamedCount()) return false; + if (getTotalPartitionsInsertedCount() != other.getTotalPartitionsInsertedCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TOTAL_PARTITIONS_STREAMED_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getTotalPartitionsStreamedCount()); + hash = (37 * hash) + TOTAL_PARTITIONS_INSERTED_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getTotalPartitionsInsertedCount()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * This is the response message sent by the server
+   * to the client for the [Partitions.StreamMetastorePartitions][] method when
+   * the commit is successful. Server will close the stream after sending this
+   * message.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse) + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse.class, + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + totalPartitionsStreamedCount_ = 0L; + totalPartitionsInsertedCount_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_StreamMetastorePartitionsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse build() { + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + buildPartial() { + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse result = + new com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.totalPartitionsStreamedCount_ = totalPartitionsStreamedCount_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.totalPartitionsInsertedCount_ = totalPartitionsInsertedCount_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + .getDefaultInstance()) return this; + if (other.getTotalPartitionsStreamedCount() != 0L) { + setTotalPartitionsStreamedCount(other.getTotalPartitionsStreamedCount()); + } + if (other.getTotalPartitionsInsertedCount() != 0L) { + setTotalPartitionsInsertedCount(other.getTotalPartitionsInsertedCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 16: + { + totalPartitionsStreamedCount_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 16 + case 24: + { + totalPartitionsInsertedCount_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long totalPartitionsStreamedCount_; + + /** + * + * + *
+     * Total count of partitions streamed by the client during the lifetime of the
+     * stream. This is only set in the final response message before closing the
+     * stream.
+     * 
+ * + * int64 total_partitions_streamed_count = 2; + * + * @return The totalPartitionsStreamedCount. + */ + @java.lang.Override + public long getTotalPartitionsStreamedCount() { + return totalPartitionsStreamedCount_; + } + + /** + * + * + *
+     * Total count of partitions streamed by the client during the lifetime of the
+     * stream. This is only set in the final response message before closing the
+     * stream.
+     * 
+ * + * int64 total_partitions_streamed_count = 2; + * + * @param value The totalPartitionsStreamedCount to set. + * @return This builder for chaining. + */ + public Builder setTotalPartitionsStreamedCount(long value) { + + totalPartitionsStreamedCount_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Total count of partitions streamed by the client during the lifetime of the
+     * stream. This is only set in the final response message before closing the
+     * stream.
+     * 
+ * + * int64 total_partitions_streamed_count = 2; + * + * @return This builder for chaining. + */ + public Builder clearTotalPartitionsStreamedCount() { + bitField0_ = (bitField0_ & ~0x00000001); + totalPartitionsStreamedCount_ = 0L; + onChanged(); + return this; + } + + private long totalPartitionsInsertedCount_; + + /** + * + * + *
+     * Total count of partitions inserted by the server during the lifetime of the
+     * stream. This is only set in the final response message before closing the
+     * stream.
+     * 
+ * + * int64 total_partitions_inserted_count = 3; + * + * @return The totalPartitionsInsertedCount. + */ + @java.lang.Override + public long getTotalPartitionsInsertedCount() { + return totalPartitionsInsertedCount_; + } + + /** + * + * + *
+     * Total count of partitions inserted by the server during the lifetime of the
+     * stream. This is only set in the final response message before closing the
+     * stream.
+     * 
+ * + * int64 total_partitions_inserted_count = 3; + * + * @param value The totalPartitionsInsertedCount to set. + * @return This builder for chaining. + */ + public Builder setTotalPartitionsInsertedCount(long value) { + + totalPartitionsInsertedCount_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Total count of partitions inserted by the server during the lifetime of the
+     * stream. This is only set in the final response message before closing the
+     * stream.
+     * 
+ * + * int64 total_partitions_inserted_count = 3; + * + * @return This builder for chaining. + */ + public Builder clearTotalPartitionsInsertedCount() { + bitField0_ = (bitField0_ & ~0x00000002); + totalPartitionsInsertedCount_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse) + private static final com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StreamMetastorePartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponseOrBuilder.java new file mode 100644 index 000000000000..99fc1a3496f0 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponseOrBuilder.java @@ -0,0 +1,56 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface StreamMetastorePartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.StreamMetastorePartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Total count of partitions streamed by the client during the lifetime of the
+   * stream. This is only set in the final response message before closing the
+   * stream.
+   * 
+ * + * int64 total_partitions_streamed_count = 2; + * + * @return The totalPartitionsStreamedCount. + */ + long getTotalPartitionsStreamedCount(); + + /** + * + * + *
+   * Total count of partitions inserted by the server during the lifetime of the
+   * stream. This is only set in the final response message before closing the
+   * stream.
+   * 
+ * + * int64 total_partitions_inserted_count = 3; + * + * @return The totalPartitionsInsertedCount. + */ + long getTotalPartitionsInsertedCount(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/TableName.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/TableName.java new file mode 100644 index 000000000000..2af3750208e6 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/TableName.java @@ -0,0 +1,217 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class TableName implements ResourceName { + private static final PathTemplate PROJECT_DATASET_TABLE = + PathTemplate.createWithoutUrlEncoding("projects/{project}/datasets/{dataset}/tables/{table}"); + private volatile Map fieldValuesMap; + private final String project; + private final String dataset; + private final String table; + + @Deprecated + protected TableName() { + project = null; + dataset = null; + table = null; + } + + private TableName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + dataset = Preconditions.checkNotNull(builder.getDataset()); + table = Preconditions.checkNotNull(builder.getTable()); + } + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static TableName of(String project, String dataset, String table) { + return newBuilder().setProject(project).setDataset(dataset).setTable(table).build(); + } + + public static String format(String project, String dataset, String table) { + return newBuilder().setProject(project).setDataset(dataset).setTable(table).build().toString(); + } + + public static TableName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_DATASET_TABLE.validatedMatch( + formattedString, "TableName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("dataset"), matchMap.get("table")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (TableName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_DATASET_TABLE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (dataset != null) { + fieldMapBuilder.put("dataset", dataset); + } + if (table != null) { + fieldMapBuilder.put("table", table); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_DATASET_TABLE.instantiate( + "project", project, "dataset", dataset, "table", table); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + TableName that = ((TableName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.dataset, that.dataset) + && Objects.equals(this.table, that.table); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(dataset); + h *= 1000003; + h ^= Objects.hashCode(table); + return h; + } + + /** Builder for projects/{project}/datasets/{dataset}/tables/{table}. */ + public static class Builder { + private String project; + private String dataset; + private String table; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setDataset(String dataset) { + this.dataset = dataset; + return this; + } + + public Builder setTable(String table) { + this.table = table; + return this; + } + + private Builder(TableName tableName) { + this.project = tableName.project; + this.dataset = tableName.dataset; + this.table = tableName.table; + } + + public TableName build() { + return new TableName(this); + } + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequest.java new file mode 100644 index 000000000000..09621576b388 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequest.java @@ -0,0 +1,1077 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +/** + * + * + *
+ * Request message for UpdateMetastorePartition.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest} + */ +public final class UpdateMetastorePartitionRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest) + UpdateMetastorePartitionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use UpdateMetastorePartitionRequest.newBuilder() to construct. + private UpdateMetastorePartitionRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private UpdateMetastorePartitionRequest() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new UpdateMetastorePartitionRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_UpdateMetastorePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_UpdateMetastorePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.class, + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.Builder.class); + } + + private int bitField0_; + public static final int METASTORE_PARTITION_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1beta.MetastorePartition metastorePartition_; + + /** + * + * + *
+   * Required. The metastore partition to be updated.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the metastorePartition field is set. + */ + @java.lang.Override + public boolean hasMetastorePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. The metastore partition to be updated.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The metastorePartition. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getMetastorePartition() { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } + + /** + * + * + *
+   * Required. The metastore partition to be updated.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getMetastorePartitionOrBuilder() { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getMetastorePartition()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getMetastorePartition()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest other = + (com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest) obj; + + if (hasMetastorePartition() != other.hasMetastorePartition()) return false; + if (hasMetastorePartition()) { + if (!getMetastorePartition().equals(other.getMetastorePartition())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasMetastorePartition()) { + hash = (37 * hash) + METASTORE_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getMetastorePartition().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for UpdateMetastorePartition.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest) + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_UpdateMetastorePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_UpdateMetastorePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.class, + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getMetastorePartitionFieldBuilder(); + getUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + metastorePartition_ = null; + if (metastorePartitionBuilder_ != null) { + metastorePartitionBuilder_.dispose(); + metastorePartitionBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta.MetastorePartitionServiceProto + .internal_static_google_cloud_bigquery_storage_v1beta_UpdateMetastorePartitionRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest build() { + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest result = + new com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.metastorePartition_ = + metastorePartitionBuilder_ == null + ? metastorePartition_ + : metastorePartitionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest + .getDefaultInstance()) return this; + if (other.hasMetastorePartition()) { + mergeMetastorePartition(other.getMetastorePartition()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + getMetastorePartitionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1beta.MetastorePartition metastorePartition_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + metastorePartitionBuilder_; + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the metastorePartition field is set. + */ + public boolean hasMetastorePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The metastorePartition. + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition getMetastorePartition() { + if (metastorePartitionBuilder_ == null) { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } else { + return metastorePartitionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setMetastorePartition( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (metastorePartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + metastorePartition_ = value; + } else { + metastorePartitionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setMetastorePartition( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder builderForValue) { + if (metastorePartitionBuilder_ == null) { + metastorePartition_ = builderForValue.build(); + } else { + metastorePartitionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeMetastorePartition( + com.google.cloud.bigquery.storage.v1beta.MetastorePartition value) { + if (metastorePartitionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && metastorePartition_ != null + && metastorePartition_ + != com.google.cloud.bigquery.storage.v1beta.MetastorePartition + .getDefaultInstance()) { + getMetastorePartitionBuilder().mergeFrom(value); + } else { + metastorePartition_ = value; + } + } else { + metastorePartitionBuilder_.mergeFrom(value); + } + if (metastorePartition_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearMetastorePartition() { + bitField0_ = (bitField0_ & ~0x00000001); + metastorePartition_ = null; + if (metastorePartitionBuilder_ != null) { + metastorePartitionBuilder_.dispose(); + metastorePartitionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder + getMetastorePartitionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getMetastorePartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getMetastorePartitionOrBuilder() { + if (metastorePartitionBuilder_ != null) { + return metastorePartitionBuilder_.getMessageOrBuilder(); + } else { + return metastorePartition_ == null + ? com.google.cloud.bigquery.storage.v1beta.MetastorePartition.getDefaultInstance() + : metastorePartition_; + } + } + + /** + * + * + *
+     * Required. The metastore partition to be updated.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder> + getMetastorePartitionFieldBuilder() { + if (metastorePartitionBuilder_ == null) { + metastorePartitionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta.MetastorePartition, + com.google.cloud.bigquery.storage.v1beta.MetastorePartition.Builder, + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder>( + getMetastorePartition(), getParentForChildren(), isClean()); + metastorePartition_ = null; + } + return metastorePartitionBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
+     * Optional. The list of fields to update.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + getUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest) + private static final com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateMetastorePartitionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequestOrBuilder.java new file mode 100644 index 000000000000..95fe6effb2a0 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequestOrBuilder.java @@ -0,0 +1,110 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta/metastore_partition.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta; + +public interface UpdateMetastorePartitionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta.UpdateMetastorePartitionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The metastore partition to be updated.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the metastorePartition field is set. + */ + boolean hasMetastorePartition(); + + /** + * + * + *
+   * Required. The metastore partition to be updated.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The metastorePartition. + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartition getMetastorePartition(); + + /** + * + * + *
+   * Required. The metastore partition to be updated.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta.MetastorePartition metastore_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta.MetastorePartitionOrBuilder + getMetastorePartitionOrBuilder(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
+   * Optional. The list of fields to update.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/proto/google/cloud/bigquery/storage/v1beta/metastore_partition.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/proto/google/cloud/bigquery/storage/v1beta/metastore_partition.proto new file mode 100644 index 000000000000..75cd43fb7c5a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/proto/google/cloud/bigquery/storage/v1beta/metastore_partition.proto @@ -0,0 +1,313 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1beta/partition.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1Beta"; +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "MetastorePartitionServiceProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1beta"; +option (google.api.resource_definition) = { + type: "bigquery.googleapis.com/Table" + pattern: "projects/{project}/datasets/{dataset}/tables/{table}" +}; + +// BigQuery Metastore Partition Service API. +// This service is used for managing metastore partitions in BigQuery +// metastore. The service supports only batch operations for write. +service MetastorePartitionService { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform"; + + // Adds metastore partitions to a table. + rpc BatchCreateMetastorePartitions(BatchCreateMetastorePartitionsRequest) + returns (BatchCreateMetastorePartitionsResponse) { + option (google.api.http) = { + post: "/v1beta/{parent=projects/*/datasets/*/tables/*}/partitions:batchCreate" + body: "*" + }; + } + + // Deletes metastore partitions from a table. + rpc BatchDeleteMetastorePartitions(BatchDeleteMetastorePartitionsRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1beta/{parent=projects/*/datasets/*/tables/*}/partitions:batchDelete" + body: "*" + }; + } + + // Updates metastore partitions in a table. + rpc BatchUpdateMetastorePartitions(BatchUpdateMetastorePartitionsRequest) + returns (BatchUpdateMetastorePartitionsResponse) { + option (google.api.http) = { + post: "/v1beta/{parent=projects/*/datasets/*/tables/*}/partitions:batchUpdate" + body: "*" + }; + } + + // Gets metastore partitions from a table. + rpc ListMetastorePartitions(ListMetastorePartitionsRequest) + returns (ListMetastorePartitionsResponse) { + option (google.api.http) = { + get: "/v1beta/{parent=projects/*/locations/*/datasets/*/tables/*}/partitions:list" + }; + option (google.api.method_signature) = "parent"; + } + + // This is a bi-di streaming rpc method that allows the client to send + // a stream of partitions and commit all of them atomically at the end. + // If the commit is successful, the server will return a + // response and close the stream. If the commit fails (due to duplicate + // partitions or other reason), the server will close the stream with an + // error. This method is only available via the gRPC API (not REST). + rpc StreamMetastorePartitions(stream StreamMetastorePartitionsRequest) + returns (stream StreamMetastorePartitionsResponse) {} +} + +// Request message for CreateMetastorePartition. The MetastorePartition is +// uniquely identified by values, which is an ordered list. Hence, there is no +// separate name or partition id field. +message CreateMetastorePartitionRequest { + // Required. Reference to the table to where the metastore partition to be + // added, in the format of + // projects/{project}/databases/{databases}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Required. The metastore partition to be added. + MetastorePartition metastore_partition = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for BatchCreateMetastorePartitions. +message BatchCreateMetastorePartitionsRequest { + // Required. Reference to the table to where the metastore partitions to be + // added, in the format of + // projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Required. Requests to add metastore partitions to the table. + repeated CreateMetastorePartitionRequest requests = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. Mimics the ifNotExists flag in IMetaStoreClient + // add_partitions(..). If the flag is set to false, the server will return + // ALREADY_EXISTS if any partition already exists. If the flag is set to true, + // the server will skip existing partitions and insert only the non-existing + // partitions. A maximum of 900 partitions can be inserted in a batch. + bool skip_existing_partitions = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Optional trace id to be used for debugging. It is expected that + // the client sets the same `trace_id` for all the batches in the same + // operation, so that it is possible to tie together the logs to all the + // batches in the same operation. Limited to 256 characters. This is expected, + // but not required, to be globally unique. + string trace_id = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for BatchCreateMetastorePartitions. +message BatchCreateMetastorePartitionsResponse { + // The list of metastore partitions that have been created. + repeated MetastorePartition partitions = 1; +} + +// Request message for BatchDeleteMetastorePartitions. The MetastorePartition is +// uniquely identified by values, which is an ordered list. Hence, there is no +// separate name or partition id field. +message BatchDeleteMetastorePartitionsRequest { + // Required. Reference to the table to which these metastore partitions + // belong, in the format of + // projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Required. The list of metastore partitions (identified by its values) to be + // deleted. A maximum of 900 partitions can be deleted in a batch. + repeated MetastorePartitionValues partition_values = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. Optional trace id to be used for debugging. It is expected that + // the client sets the same `trace_id` for all the batches in the same + // operation, so that it is possible to tie together the logs to all the + // batches in the same operation. This is expected, but not required, to be + // globally unique. + string trace_id = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for UpdateMetastorePartition. +message UpdateMetastorePartitionRequest { + // Required. The metastore partition to be updated. + MetastorePartition metastore_partition = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. The list of fields to update. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request message for BatchUpdateMetastorePartitions. +message BatchUpdateMetastorePartitionsRequest { + // Required. Reference to the table to which these metastore partitions + // belong, in the format of + // projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Required. Requests to update metastore partitions in the table. + repeated UpdateMetastorePartitionRequest requests = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. Optional trace id to be used for debugging. It is expected that + // the client sets the same `trace_id` for all the batches in the same + // operation, so that it is possible to tie together the logs to all the + // batches in the same operation. This is expected, but not required, to be + // globally unique. + string trace_id = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for BatchUpdateMetastorePartitions. +message BatchUpdateMetastorePartitionsResponse { + // The list of metastore partitions that have been updated. + // A maximum of 900 partitions can be updated in a batch. + repeated MetastorePartition partitions = 1; +} + +// Request message for ListMetastorePartitions. +message ListMetastorePartitionsRequest { + // Required. Reference to the table to which these metastore partitions + // belong, in the format of + // projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Optional. SQL text filtering statement, similar to a WHERE clause in a + // query. Only supports single-row expressions. Aggregate functions are not + // supported. + // + // Examples: + // * "int_field > 5" + // * "date_field = CAST('2014-9-27' as DATE)" + // * "nullable_field is not NULL" + // * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" + // * "numeric_field BETWEEN 1.0 AND 5.0" + // + // Restricted to a maximum length of 1 MB. + string filter = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Optional trace id to be used for debugging. It is expected that + // the client sets the same `trace_id` for all the batches in the same + // operation, so that it is possible to tie together the logs to all the + // batches in the same operation. Limited to 256 characters. This is expected, + // but not required, to be globally unique. + string trace_id = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for ListMetastorePartitions. +message ListMetastorePartitionsResponse { + // The response depends on the number of metastore partitions to be returned; + // it can be a list of partitions or a list of + // [ReadStream]((https://cloud.google.com/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1#readstream)) + // objects. For the second situation, the BigQuery [Read API + // ReadRows](https://cloud.google.com/bigquery/docs/reference/storage#read_from_a_session_stream) + // method must be used to stream the data and convert it into a list of + // partitions. + oneof response { + // The list of partitions. + MetastorePartitionList partitions = 1; + + // The list of streams. + StreamList streams = 2; + } +} + +// The top-level message sent by the client to the +// [Partitions.StreamMetastorePartitions][] method. +// Follows the default gRPC streaming maximum size of 4 MB. +message StreamMetastorePartitionsRequest { + // Required. Reference to the table to where the partition to be added, in the + // format of + // projects/{project}/locations/{location}/datasets/{dataset}/tables/{table}. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Optional. A list of metastore partitions to be added to the table. + repeated MetastorePartition metastore_partitions = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Mimics the ifNotExists flag in IMetaStoreClient + // add_partitions(..). If the flag is set to false, the server will return + // ALREADY_EXISTS on commit if any partition already exists. If the flag is + // set to true: + // 1) the server will skip existing partitions + // insert only the non-existing partitions as part of the commit. + // 2) The client must set the `skip_existing_partitions` field to true for + // all requests in the stream. + bool skip_existing_partitions = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// This is the response message sent by the server +// to the client for the [Partitions.StreamMetastorePartitions][] method when +// the commit is successful. Server will close the stream after sending this +// message. +message StreamMetastorePartitionsResponse { + // Total count of partitions streamed by the client during the lifetime of the + // stream. This is only set in the final response message before closing the + // stream. + int64 total_partitions_streamed_count = 2; + + // Total count of partitions inserted by the server during the lifetime of the + // stream. This is only set in the final response message before closing the + // stream. + int64 total_partitions_inserted_count = 3; +} + +// Structured custom error message for batch size too large error. +// The error can be attached as error details in the returned rpc Status for +// more structured error handling in the client. +message BatchSizeTooLargeError { + // The maximum number of items that are supported in a single batch. This is + // returned as a hint to the client to adjust the batch size. + int64 max_batch_size = 1; + + // Optional. The error message that is returned to the client. + string error_message = 2 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/proto/google/cloud/bigquery/storage/v1beta/partition.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/proto/google/cloud/bigquery/storage/v1beta/partition.proto new file mode 100644 index 000000000000..a4bf07547391 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta/src/main/proto/google/cloud/bigquery/storage/v1beta/partition.proto @@ -0,0 +1,140 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1Beta"; +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "MetastorePartitionProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1beta"; + +// Schema description of a metastore partition column. +message FieldSchema { + // Required. The name of the column. + // The maximum length of the name is 1024 characters + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The type of the metastore partition column. Maximum allowed + // length is 1024 characters. + string type = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Contains information about the physical storage of the data in the metastore +// partition. +message StorageDescriptor { + // Optional. The physical location of the metastore partition + // (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or + // `gs://spark-dataproc-data/pangea-data/*`). + string location_uri = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the fully qualified class name of the InputFormat + // (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"). + // The maximum length is 128 characters. + string input_format = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the fully qualified class name of the OutputFormat + // (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"). + // The maximum length is 128 characters. + string output_format = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Serializer and deserializer information. + SerDeInfo serde_info = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Serializer and deserializer information. +message SerDeInfo { + // Optional. Name of the SerDe. + // The maximum length is 256 characters. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. Specifies a fully-qualified class name of the serialization + // library that is responsible for the translation of data between table + // representation and the underlying low-level input and output format + // structures. The maximum length is 256 characters. + string serialization_library = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Key-value pairs that define the initialization parameters for the + // serialization library. + // Maximum size 10 Kib. + map parameters = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Information about a Hive partition. +message MetastorePartition { + // Required. Represents the values of the partition keys, where each value + // corresponds to a specific partition key in the order in which the keys are + // defined. Each value is limited to 1024 characters. + repeated string values = 1 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The creation time of the partition. + google.protobuf.Timestamp create_time = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Contains information about the physical storage of the data in + // the partition. + StorageDescriptor storage_descriptor = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional parameters or metadata associated with the partition. + // Maximum size 10 KiB. + map parameters = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. List of columns. + repeated FieldSchema fields = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// List of metastore partitions. +message MetastorePartitionList { + // Required. List of partitions. + repeated MetastorePartition partitions = 1 + [(google.api.field_behavior) = REQUIRED]; +} + +// Information about a single stream that is used to read partitions. +message ReadStream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadStream" + pattern: "projects/{project}/locations/{location}/sessions/{session}/streams/{stream}" + plural: "readStreams" + singular: "readStream" + }; + + // Output only. Identifier. Name of the stream, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`. + string name = 1 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = IDENTIFIER + ]; +} + +// List of streams. +message StreamList { + // Output only. List of streams. + repeated ReadStream streams = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Represents the values of a metastore partition. +message MetastorePartitionValues { + // Required. The values of the partition keys, where each value corresponds to + // a specific partition key in the order in which the keys are defined. + repeated string values = 1 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/clirr-ignored-differences.xml b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/clirr-ignored-differences.xml new file mode 100644 index 000000000000..5fe0b05937ef --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/clirr-ignored-differences.xml @@ -0,0 +1,81 @@ + + + + + 7012 + com/google/cloud/bigquery/storage/v1beta1/Storage$ReadRowsResponseOrBuilder + * *Arrow*(*) + + + 7012 + com/google/cloud/bigquery/storage/v1beta1/Storage$ReadRowsResponseOrBuilder + * *Avro*(*) + + + 7012 + com/google/cloud/bigquery/storage/v1beta1/Storage$ReadRowsResponseOrBuilder + * getSchemaCase() + + + + + 7006 + com/google/cloud/bigquery/storage/v1beta1/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta1/** + * addRepeatedField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta1/** + * clear() + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta1/** + * clearField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta1/** + * clearOneof(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta1/** + * clone() + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta1/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta1/** + * setField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta1/** + * setRepeatedField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta1/** + * setUnknownFields(*) + ** + + diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/pom.xml b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/pom.xml new file mode 100644 index 000000000000..962b23b148ba --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/pom.xml @@ -0,0 +1,42 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta1 + 0.191.1 + proto-google-cloud-bigquerystorage-v1beta1 + PROTO library for proto-google-cloud-bigquerystorage-v1beta1 + + com.google.cloud + google-cloud-bigquerystorage-parent + 3.19.1 + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api + api-common + + + com.google.guava + guava + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ArrowProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ArrowProto.java new file mode 100644 index 000000000000..bfea7beff314 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ArrowProto.java @@ -0,0 +1,1321 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta1/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta1; + +public final class ArrowProto { + private ArrowProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + public interface ArrowSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.ArrowSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * IPC serialized Arrow schema.
+     * 
+ * + * bytes serialized_schema = 1; + * + * @return The serializedSchema. + */ + com.google.protobuf.ByteString getSerializedSchema(); + } + + /** + * + * + *
+   * Arrow schema.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.ArrowSchema} + */ + public static final class ArrowSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.ArrowSchema) + ArrowSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ArrowSchema.newBuilder() to construct. + private ArrowSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ArrowSchema() { + serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ArrowSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta1_ArrowSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta1_ArrowSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.class, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.Builder.class); + } + + public static final int SERIALIZED_SCHEMA_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * IPC serialized Arrow schema.
+     * 
+ * + * bytes serialized_schema = 1; + * + * @return The serializedSchema. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedSchema() { + return serializedSchema_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!serializedSchema_.isEmpty()) { + output.writeBytes(1, serializedSchema_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!serializedSchema_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, serializedSchema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema other = + (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) obj; + + if (!getSerializedSchema().equals(other.getSerializedSchema())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERIALIZED_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getSerializedSchema().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Arrow schema.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.ArrowSchema} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.ArrowSchema) + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta1_ArrowSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta1_ArrowSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.class, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta1_ArrowSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema build() { + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema result = + new com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.serializedSchema_ = serializedSchema_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + .getDefaultInstance()) return this; + if (other.getSerializedSchema() != com.google.protobuf.ByteString.EMPTY) { + setSerializedSchema(other.getSerializedSchema()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + serializedSchema_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString serializedSchema_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+       * IPC serialized Arrow schema.
+       * 
+ * + * bytes serialized_schema = 1; + * + * @return The serializedSchema. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedSchema() { + return serializedSchema_; + } + + /** + * + * + *
+       * IPC serialized Arrow schema.
+       * 
+ * + * bytes serialized_schema = 1; + * + * @param value The serializedSchema to set. + * @return This builder for chaining. + */ + public Builder setSerializedSchema(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + serializedSchema_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * IPC serialized Arrow schema.
+       * 
+ * + * bytes serialized_schema = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedSchema() { + bitField0_ = (bitField0_ & ~0x00000001); + serializedSchema_ = getDefaultInstance().getSerializedSchema(); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.ArrowSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.ArrowSchema) + private static final com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ArrowSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ArrowRecordBatchOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * IPC serialized Arrow RecordBatch.
+     * 
+ * + * bytes serialized_record_batch = 1; + * + * @return The serializedRecordBatch. + */ + com.google.protobuf.ByteString getSerializedRecordBatch(); + + /** + * + * + *
+     * The count of rows in the returning block.
+     * 
+ * + * int64 row_count = 2; + * + * @return The rowCount. + */ + long getRowCount(); + } + + /** + * + * + *
+   * Arrow RecordBatch.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch} + */ + public static final class ArrowRecordBatch extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch) + ArrowRecordBatchOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ArrowRecordBatch.newBuilder() to construct. + private ArrowRecordBatch(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ArrowRecordBatch() { + serializedRecordBatch_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ArrowRecordBatch(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta1_ArrowRecordBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta1_ArrowRecordBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch.class, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch.Builder.class); + } + + public static final int SERIALIZED_RECORD_BATCH_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString serializedRecordBatch_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * IPC serialized Arrow RecordBatch.
+     * 
+ * + * bytes serialized_record_batch = 1; + * + * @return The serializedRecordBatch. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedRecordBatch() { + return serializedRecordBatch_; + } + + public static final int ROW_COUNT_FIELD_NUMBER = 2; + private long rowCount_ = 0L; + + /** + * + * + *
+     * The count of rows in the returning block.
+     * 
+ * + * int64 row_count = 2; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!serializedRecordBatch_.isEmpty()) { + output.writeBytes(1, serializedRecordBatch_); + } + if (rowCount_ != 0L) { + output.writeInt64(2, rowCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!serializedRecordBatch_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, serializedRecordBatch_); + } + if (rowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, rowCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch other = + (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch) obj; + + if (!getSerializedRecordBatch().equals(other.getSerializedRecordBatch())) return false; + if (getRowCount() != other.getRowCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERIALIZED_RECORD_BATCH_FIELD_NUMBER; + hash = (53 * hash) + getSerializedRecordBatch().hashCode(); + hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Arrow RecordBatch.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch) + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatchOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta1_ArrowRecordBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta1_ArrowRecordBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch.class, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + serializedRecordBatch_ = com.google.protobuf.ByteString.EMPTY; + rowCount_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta1_ArrowRecordBatch_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch build() { + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch result = + new com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.serializedRecordBatch_ = serializedRecordBatch_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.rowCount_ = rowCount_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + .getDefaultInstance()) return this; + if (other.getSerializedRecordBatch() != com.google.protobuf.ByteString.EMPTY) { + setSerializedRecordBatch(other.getSerializedRecordBatch()); + } + if (other.getRowCount() != 0L) { + setRowCount(other.getRowCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + serializedRecordBatch_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + rowCount_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString serializedRecordBatch_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+       * IPC serialized Arrow RecordBatch.
+       * 
+ * + * bytes serialized_record_batch = 1; + * + * @return The serializedRecordBatch. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedRecordBatch() { + return serializedRecordBatch_; + } + + /** + * + * + *
+       * IPC serialized Arrow RecordBatch.
+       * 
+ * + * bytes serialized_record_batch = 1; + * + * @param value The serializedRecordBatch to set. + * @return This builder for chaining. + */ + public Builder setSerializedRecordBatch(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + serializedRecordBatch_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * IPC serialized Arrow RecordBatch.
+       * 
+ * + * bytes serialized_record_batch = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedRecordBatch() { + bitField0_ = (bitField0_ & ~0x00000001); + serializedRecordBatch_ = getDefaultInstance().getSerializedRecordBatch(); + onChanged(); + return this; + } + + private long rowCount_; + + /** + * + * + *
+       * The count of rows in the returning block.
+       * 
+ * + * int64 row_count = 2; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + /** + * + * + *
+       * The count of rows in the returning block.
+       * 
+ * + * int64 row_count = 2; + * + * @param value The rowCount to set. + * @return This builder for chaining. + */ + public Builder setRowCount(long value) { + + rowCount_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * The count of rows in the returning block.
+       * 
+ * + * int64 row_count = 2; + * + * @return This builder for chaining. + */ + public Builder clearRowCount() { + bitField0_ = (bitField0_ & ~0x00000002); + rowCount_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch) + private static final com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ArrowRecordBatch parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_ArrowSchema_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_ArrowSchema_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_ArrowRecordBatch_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_ArrowRecordBatch_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n1google/cloud/bigquery/storage/v1beta1/" + + "arrow.proto\022%google.cloud.bigquery.stora" + + "ge.v1beta1\"(\n\013ArrowSchema\022\031\n\021serialized_" + + "schema\030\001 \001(\014\"F\n\020ArrowRecordBatch\022\037\n\027seri" + + "alized_record_batch\030\001 \001(\014\022\021\n\trow_count\030\002" + + " \001(\003B|\n)com.google.cloud.bigquery.storag" + + "e.v1beta1B\nArrowProtoZCcloud.google.com/" + + "go/bigquery/storage/apiv1beta1/storagepb" + + ";storagepbb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); + internal_static_google_cloud_bigquery_storage_v1beta1_ArrowSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta1_ArrowSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_ArrowSchema_descriptor, + new java.lang.String[] { + "SerializedSchema", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_ArrowRecordBatch_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta1_ArrowRecordBatch_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_ArrowRecordBatch_descriptor, + new java.lang.String[] { + "SerializedRecordBatch", "RowCount", + }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/AvroProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/AvroProto.java new file mode 100644 index 000000000000..fd8163fc50a1 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/AvroProto.java @@ -0,0 +1,1417 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta1/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta1; + +public final class AvroProto { + private AvroProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + public interface AvroSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.AvroSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html
+     * 
+ * + * string schema = 1; + * + * @return The schema. + */ + java.lang.String getSchema(); + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html
+     * 
+ * + * string schema = 1; + * + * @return The bytes for schema. + */ + com.google.protobuf.ByteString getSchemaBytes(); + } + + /** + * + * + *
+   * Avro schema.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.AvroSchema} + */ + public static final class AvroSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.AvroSchema) + AvroSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AvroSchema.newBuilder() to construct. + private AvroSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AvroSchema() { + schema_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AvroSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta1_AvroSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta1_AvroSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.class, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.Builder.class); + } + + public static final int SCHEMA_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object schema_ = ""; + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html
+     * 
+ * + * string schema = 1; + * + * @return The schema. + */ + @java.lang.Override + public java.lang.String getSchema() { + java.lang.Object ref = schema_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + schema_ = s; + return s; + } + } + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html
+     * 
+ * + * string schema = 1; + * + * @return The bytes for schema. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSchemaBytes() { + java.lang.Object ref = schema_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + schema_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(schema_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, schema_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(schema_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, schema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema other = + (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) obj; + + if (!getSchema().equals(other.getSchema())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getSchema().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Avro schema.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.AvroSchema} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.AvroSchema) + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta1_AvroSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta1_AvroSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.class, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + schema_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta1_AvroSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema build() { + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema result = + new com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.schema_ = schema_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.getDefaultInstance()) + return this; + if (!other.getSchema().isEmpty()) { + schema_ = other.schema_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + schema_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object schema_ = ""; + + /** + * + * + *
+       * Json serialized schema, as described at
+       * https://avro.apache.org/docs/1.8.1/spec.html
+       * 
+ * + * string schema = 1; + * + * @return The schema. + */ + public java.lang.String getSchema() { + java.lang.Object ref = schema_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + schema_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Json serialized schema, as described at
+       * https://avro.apache.org/docs/1.8.1/spec.html
+       * 
+ * + * string schema = 1; + * + * @return The bytes for schema. + */ + public com.google.protobuf.ByteString getSchemaBytes() { + java.lang.Object ref = schema_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + schema_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Json serialized schema, as described at
+       * https://avro.apache.org/docs/1.8.1/spec.html
+       * 
+ * + * string schema = 1; + * + * @param value The schema to set. + * @return This builder for chaining. + */ + public Builder setSchema(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Json serialized schema, as described at
+       * https://avro.apache.org/docs/1.8.1/spec.html
+       * 
+ * + * string schema = 1; + * + * @return This builder for chaining. + */ + public Builder clearSchema() { + schema_ = getDefaultInstance().getSchema(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+       * Json serialized schema, as described at
+       * https://avro.apache.org/docs/1.8.1/spec.html
+       * 
+ * + * string schema = 1; + * + * @param value The bytes for schema to set. + * @return This builder for chaining. + */ + public Builder setSchemaBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + schema_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.AvroSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.AvroSchema) + private static final com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AvroSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface AvroRowsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.AvroRows) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Binary serialized rows in a block.
+     * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return The serializedBinaryRows. + */ + com.google.protobuf.ByteString getSerializedBinaryRows(); + + /** + * + * + *
+     * The count of rows in the returning block.
+     * 
+ * + * int64 row_count = 2; + * + * @return The rowCount. + */ + long getRowCount(); + } + + /** + * + * + *
+   * Avro rows.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.AvroRows} + */ + public static final class AvroRows extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.AvroRows) + AvroRowsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AvroRows.newBuilder() to construct. + private AvroRows(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AvroRows() { + serializedBinaryRows_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AvroRows(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta1_AvroRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta1_AvroRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.class, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.Builder.class); + } + + public static final int SERIALIZED_BINARY_ROWS_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString serializedBinaryRows_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * Binary serialized rows in a block.
+     * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return The serializedBinaryRows. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedBinaryRows() { + return serializedBinaryRows_; + } + + public static final int ROW_COUNT_FIELD_NUMBER = 2; + private long rowCount_ = 0L; + + /** + * + * + *
+     * The count of rows in the returning block.
+     * 
+ * + * int64 row_count = 2; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!serializedBinaryRows_.isEmpty()) { + output.writeBytes(1, serializedBinaryRows_); + } + if (rowCount_ != 0L) { + output.writeInt64(2, rowCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!serializedBinaryRows_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, serializedBinaryRows_); + } + if (rowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, rowCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows other = + (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows) obj; + + if (!getSerializedBinaryRows().equals(other.getSerializedBinaryRows())) return false; + if (getRowCount() != other.getRowCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERIALIZED_BINARY_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getSerializedBinaryRows().hashCode(); + hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Avro rows.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.AvroRows} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.AvroRows) + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRowsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta1_AvroRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta1_AvroRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.class, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + serializedBinaryRows_ = com.google.protobuf.ByteString.EMPTY; + rowCount_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta1_AvroRows_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows build() { + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows result = + new com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.serializedBinaryRows_ = serializedBinaryRows_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.rowCount_ = rowCount_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.getDefaultInstance()) + return this; + if (other.getSerializedBinaryRows() != com.google.protobuf.ByteString.EMPTY) { + setSerializedBinaryRows(other.getSerializedBinaryRows()); + } + if (other.getRowCount() != 0L) { + setRowCount(other.getRowCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + serializedBinaryRows_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + rowCount_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString serializedBinaryRows_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+       * Binary serialized rows in a block.
+       * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return The serializedBinaryRows. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedBinaryRows() { + return serializedBinaryRows_; + } + + /** + * + * + *
+       * Binary serialized rows in a block.
+       * 
+ * + * bytes serialized_binary_rows = 1; + * + * @param value The serializedBinaryRows to set. + * @return This builder for chaining. + */ + public Builder setSerializedBinaryRows(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + serializedBinaryRows_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Binary serialized rows in a block.
+       * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedBinaryRows() { + bitField0_ = (bitField0_ & ~0x00000001); + serializedBinaryRows_ = getDefaultInstance().getSerializedBinaryRows(); + onChanged(); + return this; + } + + private long rowCount_; + + /** + * + * + *
+       * The count of rows in the returning block.
+       * 
+ * + * int64 row_count = 2; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + /** + * + * + *
+       * The count of rows in the returning block.
+       * 
+ * + * int64 row_count = 2; + * + * @param value The rowCount to set. + * @return This builder for chaining. + */ + public Builder setRowCount(long value) { + + rowCount_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * The count of rows in the returning block.
+       * 
+ * + * int64 row_count = 2; + * + * @return This builder for chaining. + */ + public Builder clearRowCount() { + bitField0_ = (bitField0_ & ~0x00000002); + rowCount_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.AvroRows) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.AvroRows) + private static final com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AvroRows parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_AvroSchema_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_AvroSchema_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_AvroRows_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_AvroRows_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n0google/cloud/bigquery/storage/v1beta1/" + + "avro.proto\022%google.cloud.bigquery.storag" + + "e.v1beta1\"\034\n\nAvroSchema\022\016\n\006schema\030\001 \001(\t\"" + + "=\n\010AvroRows\022\036\n\026serialized_binary_rows\030\001 " + + "\001(\014\022\021\n\trow_count\030\002 \001(\003B{\n)com.google.clo" + + "ud.bigquery.storage.v1beta1B\tAvroProtoZC" + + "cloud.google.com/go/bigquery/storage/api" + + "v1beta1/storagepb;storagepbb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); + internal_static_google_cloud_bigquery_storage_v1beta1_AvroSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta1_AvroSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_AvroSchema_descriptor, + new java.lang.String[] { + "Schema", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_AvroRows_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta1_AvroRows_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_AvroRows_descriptor, + new java.lang.String[] { + "SerializedBinaryRows", "RowCount", + }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ProjectName.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ProjectName.java new file mode 100644 index 000000000000..0d6ace761289 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ProjectName.java @@ -0,0 +1,168 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ProjectName implements ResourceName { + private static final PathTemplate PROJECT = + PathTemplate.createWithoutUrlEncoding("projects/{project}"); + private volatile Map fieldValuesMap; + private final String project; + + @Deprecated + protected ProjectName() { + project = null; + } + + private ProjectName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + } + + public String getProject() { + return project; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ProjectName of(String project) { + return newBuilder().setProject(project).build(); + } + + public static String format(String project) { + return newBuilder().setProject(project).build().toString(); + } + + public static ProjectName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT.validatedMatch( + formattedString, "ProjectName.parse: formattedString not in valid format"); + return of(matchMap.get("project")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ProjectName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT.instantiate("project", project); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + ProjectName that = ((ProjectName) o); + return Objects.equals(this.project, that.project); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + return h; + } + + /** Builder for projects/{project}. */ + public static class Builder { + private String project; + + protected Builder() {} + + public String getProject() { + return project; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + private Builder(ProjectName projectName) { + this.project = projectName.project; + } + + public ProjectName build() { + return new ProjectName(this); + } + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java new file mode 100644 index 000000000000..ca44ba83f07c --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java @@ -0,0 +1,1943 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta1/read_options.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta1; + +public final class ReadOptions { + private ReadOptions() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + public interface TableReadOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.TableReadOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return A list containing the selectedFields. + */ + java.util.List getSelectedFieldsList(); + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return The count of selectedFields. + */ + int getSelectedFieldsCount(); + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the element to return. + * @return The selectedFields at the given index. + */ + java.lang.String getSelectedFields(int index); + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the value to return. + * @return The bytes of the selectedFields at the given index. + */ + com.google.protobuf.ByteString getSelectedFieldsBytes(int index); + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in
+     * a SQL query. Aggregates are not supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string row_restriction = 2; + * + * @return The rowRestriction. + */ + java.lang.String getRowRestriction(); + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in
+     * a SQL query. Aggregates are not supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string row_restriction = 2; + * + * @return The bytes for rowRestriction. + */ + com.google.protobuf.ByteString getRowRestrictionBytes(); + } + + /** + * + * + *
+   * Options dictating how we read a table.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.TableReadOptions} + */ + public static final class TableReadOptions extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.TableReadOptions) + TableReadOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use TableReadOptions.newBuilder() to construct. + private TableReadOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableReadOptions() { + selectedFields_ = com.google.protobuf.LazyStringArrayList.emptyList(); + rowRestriction_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableReadOptions(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.ReadOptions + .internal_static_google_cloud_bigquery_storage_v1beta1_TableReadOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.ReadOptions + .internal_static_google_cloud_bigquery_storage_v1beta1_TableReadOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions.class, + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions.Builder.class); + } + + public static final int SELECTED_FIELDS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList selectedFields_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return A list containing the selectedFields. + */ + public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { + return selectedFields_; + } + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return The count of selectedFields. + */ + public int getSelectedFieldsCount() { + return selectedFields_.size(); + } + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the element to return. + * @return The selectedFields at the given index. + */ + public java.lang.String getSelectedFields(int index) { + return selectedFields_.get(index); + } + + /** + * + * + *
+     * Optional. The names of the fields in the table to be returned. If no
+     * field names are specified, then all fields in the table are returned.
+     *
+     * Nested fields -- the child elements of a STRUCT field -- can be selected
+     * individually using their fully-qualified names, and will be returned as
+     * record fields containing only the selected nested fields. If a STRUCT
+     * field is specified in the selected fields list, all of the child elements
+     * will be returned.
+     *
+     * As an example, consider a table with the following schema:
+     *
+     *   {
+     *       "name": "struct_field",
+     *       "type": "RECORD",
+     *       "mode": "NULLABLE",
+     *       "fields": [
+     *           {
+     *               "name": "string_field1",
+     *               "type": "STRING",
+     * .              "mode": "NULLABLE"
+     *           },
+     *           {
+     *               "name": "string_field2",
+     *               "type": "STRING",
+     *               "mode": "NULLABLE"
+     *           }
+     *       ]
+     *   }
+     *
+     * Specifying "struct_field" in the selected fields list will result in a
+     * read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *       string_field2
+     *   }
+     *
+     * Specifying "struct_field.string_field1" in the selected fields list will
+     * result in a read session schema with the following logical structure:
+     *
+     *   struct_field {
+     *       string_field1
+     *   }
+     *
+     * The order of the fields in the read session schema is derived from the
+     * table schema and does not correspond to the order in which the fields are
+     * specified in this list.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the value to return. + * @return The bytes of the selectedFields at the given index. + */ + public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { + return selectedFields_.getByteString(index); + } + + public static final int ROW_RESTRICTION_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object rowRestriction_ = ""; + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in
+     * a SQL query. Aggregates are not supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string row_restriction = 2; + * + * @return The rowRestriction. + */ + @java.lang.Override + public java.lang.String getRowRestriction() { + java.lang.Object ref = rowRestriction_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rowRestriction_ = s; + return s; + } + } + + /** + * + * + *
+     * Optional. SQL text filtering statement, similar to a WHERE clause in
+     * a SQL query. Aggregates are not supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string row_restriction = 2; + * + * @return The bytes for rowRestriction. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRowRestrictionBytes() { + java.lang.Object ref = rowRestriction_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rowRestriction_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < selectedFields_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, selectedFields_.getRaw(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(rowRestriction_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, rowRestriction_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < selectedFields_.size(); i++) { + dataSize += computeStringSizeNoTag(selectedFields_.getRaw(i)); + } + size += dataSize; + size += 1 * getSelectedFieldsList().size(); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(rowRestriction_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, rowRestriction_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions other = + (com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions) obj; + + if (!getSelectedFieldsList().equals(other.getSelectedFieldsList())) return false; + if (!getRowRestriction().equals(other.getRowRestriction())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getSelectedFieldsCount() > 0) { + hash = (37 * hash) + SELECTED_FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getSelectedFieldsList().hashCode(); + } + hash = (37 * hash) + ROW_RESTRICTION_FIELD_NUMBER; + hash = (53 * hash) + getRowRestriction().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Options dictating how we read a table.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.TableReadOptions} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.TableReadOptions) + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.ReadOptions + .internal_static_google_cloud_bigquery_storage_v1beta1_TableReadOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.ReadOptions + .internal_static_google_cloud_bigquery_storage_v1beta1_TableReadOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions.class, + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + selectedFields_ = com.google.protobuf.LazyStringArrayList.emptyList(); + rowRestriction_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.ReadOptions + .internal_static_google_cloud_bigquery_storage_v1beta1_TableReadOptions_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions build() { + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions result = + new com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + selectedFields_.makeImmutable(); + result.selectedFields_ = selectedFields_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.rowRestriction_ = rowRestriction_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + .getDefaultInstance()) return this; + if (!other.selectedFields_.isEmpty()) { + if (selectedFields_.isEmpty()) { + selectedFields_ = other.selectedFields_; + bitField0_ |= 0x00000001; + } else { + ensureSelectedFieldsIsMutable(); + selectedFields_.addAll(other.selectedFields_); + } + onChanged(); + } + if (!other.getRowRestriction().isEmpty()) { + rowRestriction_ = other.rowRestriction_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureSelectedFieldsIsMutable(); + selectedFields_.add(s); + break; + } // case 10 + case 18: + { + rowRestriction_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList selectedFields_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureSelectedFieldsIsMutable() { + if (!selectedFields_.isModifiable()) { + selectedFields_ = new com.google.protobuf.LazyStringArrayList(selectedFields_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @return A list containing the selectedFields. + */ + public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { + selectedFields_.makeImmutable(); + return selectedFields_; + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @return The count of selectedFields. + */ + public int getSelectedFieldsCount() { + return selectedFields_.size(); + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the element to return. + * @return The selectedFields at the given index. + */ + public java.lang.String getSelectedFields(int index) { + return selectedFields_.get(index); + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the value to return. + * @return The bytes of the selectedFields at the given index. + */ + public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { + return selectedFields_.getByteString(index); + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index to set the value at. + * @param value The selectedFields to set. + * @return This builder for chaining. + */ + public Builder setSelectedFields(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSelectedFieldsIsMutable(); + selectedFields_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param value The selectedFields to add. + * @return This builder for chaining. + */ + public Builder addSelectedFields(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSelectedFieldsIsMutable(); + selectedFields_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param values The selectedFields to add. + * @return This builder for chaining. + */ + public Builder addAllSelectedFields(java.lang.Iterable values) { + ensureSelectedFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, selectedFields_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @return This builder for chaining. + */ + public Builder clearSelectedFields() { + selectedFields_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. The names of the fields in the table to be returned. If no
+       * field names are specified, then all fields in the table are returned.
+       *
+       * Nested fields -- the child elements of a STRUCT field -- can be selected
+       * individually using their fully-qualified names, and will be returned as
+       * record fields containing only the selected nested fields. If a STRUCT
+       * field is specified in the selected fields list, all of the child elements
+       * will be returned.
+       *
+       * As an example, consider a table with the following schema:
+       *
+       *   {
+       *       "name": "struct_field",
+       *       "type": "RECORD",
+       *       "mode": "NULLABLE",
+       *       "fields": [
+       *           {
+       *               "name": "string_field1",
+       *               "type": "STRING",
+       * .              "mode": "NULLABLE"
+       *           },
+       *           {
+       *               "name": "string_field2",
+       *               "type": "STRING",
+       *               "mode": "NULLABLE"
+       *           }
+       *       ]
+       *   }
+       *
+       * Specifying "struct_field" in the selected fields list will result in a
+       * read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *       string_field2
+       *   }
+       *
+       * Specifying "struct_field.string_field1" in the selected fields list will
+       * result in a read session schema with the following logical structure:
+       *
+       *   struct_field {
+       *       string_field1
+       *   }
+       *
+       * The order of the fields in the read session schema is derived from the
+       * table schema and does not correspond to the order in which the fields are
+       * specified in this list.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param value The bytes of the selectedFields to add. + * @return This builder for chaining. + */ + public Builder addSelectedFieldsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureSelectedFieldsIsMutable(); + selectedFields_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object rowRestriction_ = ""; + + /** + * + * + *
+       * Optional. SQL text filtering statement, similar to a WHERE clause in
+       * a SQL query. Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @return The rowRestriction. + */ + public java.lang.String getRowRestriction() { + java.lang.Object ref = rowRestriction_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rowRestriction_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Optional. SQL text filtering statement, similar to a WHERE clause in
+       * a SQL query. Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @return The bytes for rowRestriction. + */ + public com.google.protobuf.ByteString getRowRestrictionBytes() { + java.lang.Object ref = rowRestriction_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rowRestriction_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Optional. SQL text filtering statement, similar to a WHERE clause in
+       * a SQL query. Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @param value The rowRestriction to set. + * @return This builder for chaining. + */ + public Builder setRowRestriction(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + rowRestriction_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. SQL text filtering statement, similar to a WHERE clause in
+       * a SQL query. Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @return This builder for chaining. + */ + public Builder clearRowRestriction() { + rowRestriction_ = getDefaultInstance().getRowRestriction(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. SQL text filtering statement, similar to a WHERE clause in
+       * a SQL query. Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @param value The bytes for rowRestriction to set. + * @return This builder for chaining. + */ + public Builder setRowRestrictionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + rowRestriction_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.TableReadOptions) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.TableReadOptions) + private static final com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableReadOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_TableReadOptions_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_TableReadOptions_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n8google/cloud/bigquery/storage/v1beta1/" + + "read_options.proto\022%google.cloud.bigquer" + + "y.storage.v1beta1\"D\n\020TableReadOptions\022\027\n" + + "\017selected_fields\030\001 \003(\t\022\027\n\017row_restrictio" + + "n\030\002 \001(\tBp\n)com.google.cloud.bigquery.sto" + + "rage.v1beta1ZCcloud.google.com/go/bigque" + + "ry/storage/apiv1beta1/storagepb;storagep" + + "bb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); + internal_static_google_cloud_bigquery_storage_v1beta1_TableReadOptions_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta1_TableReadOptions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_TableReadOptions_descriptor, + new java.lang.String[] { + "SelectedFields", "RowRestriction", + }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java new file mode 100644 index 000000000000..b7aef33c9223 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java @@ -0,0 +1,19133 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta1/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta1; + +public final class Storage { + private Storage() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + /** + * + * + *
+   * Data format for input or output data.
+   * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1beta1.DataFormat} + */ + public enum DataFormat implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Data format is unspecified.
+     * 
+ * + * DATA_FORMAT_UNSPECIFIED = 0; + */ + DATA_FORMAT_UNSPECIFIED(0), + /** + * + * + *
+     * Avro is a standard open source row based file format.
+     * See https://avro.apache.org/ for more details.
+     * 
+ * + * AVRO = 1; + */ + AVRO(1), + /** + * + * + *
+     * Arrow is a standard open source column-based message format.
+     * See https://arrow.apache.org/ for more details.
+     * 
+ * + * ARROW = 3; + */ + ARROW(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Data format is unspecified.
+     * 
+ * + * DATA_FORMAT_UNSPECIFIED = 0; + */ + public static final int DATA_FORMAT_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * Avro is a standard open source row based file format.
+     * See https://avro.apache.org/ for more details.
+     * 
+ * + * AVRO = 1; + */ + public static final int AVRO_VALUE = 1; + + /** + * + * + *
+     * Arrow is a standard open source column-based message format.
+     * See https://arrow.apache.org/ for more details.
+     * 
+ * + * ARROW = 3; + */ + public static final int ARROW_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static DataFormat valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static DataFormat forNumber(int value) { + switch (value) { + case 0: + return DATA_FORMAT_UNSPECIFIED; + case 1: + return AVRO; + case 3: + return ARROW; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public DataFormat findValueByNumber(int number) { + return DataFormat.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final DataFormat[] VALUES = values(); + + public static DataFormat valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private DataFormat(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1beta1.DataFormat) + } + + /** + * + * + *
+   * Strategy for distributing data among multiple streams in a read session.
+   * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1beta1.ShardingStrategy} + */ + public enum ShardingStrategy implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Same as LIQUID.
+     * 
+ * + * SHARDING_STRATEGY_UNSPECIFIED = 0; + */ + SHARDING_STRATEGY_UNSPECIFIED(0), + /** + * + * + *
+     * Assigns data to each stream based on the client's read rate. The faster the
+     * client reads from a stream, the more data is assigned to the stream. In
+     * this strategy, it's possible to read all data from a single stream even if
+     * there are other streams present.
+     * 
+ * + * LIQUID = 1; + */ + LIQUID(1), + /** + * + * + *
+     * Assigns data to each stream such that roughly the same number of rows can
+     * be read from each stream. Because the server-side unit for assigning data
+     * is collections of rows, the API does not guarantee that each stream will
+     * return the same number or rows. Additionally, the limits are enforced based
+     * on the number of pre-filtering rows, so some filters can lead to lopsided
+     * assignments.
+     * 
+ * + * BALANCED = 2; + */ + BALANCED(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Same as LIQUID.
+     * 
+ * + * SHARDING_STRATEGY_UNSPECIFIED = 0; + */ + public static final int SHARDING_STRATEGY_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * Assigns data to each stream based on the client's read rate. The faster the
+     * client reads from a stream, the more data is assigned to the stream. In
+     * this strategy, it's possible to read all data from a single stream even if
+     * there are other streams present.
+     * 
+ * + * LIQUID = 1; + */ + public static final int LIQUID_VALUE = 1; + + /** + * + * + *
+     * Assigns data to each stream such that roughly the same number of rows can
+     * be read from each stream. Because the server-side unit for assigning data
+     * is collections of rows, the API does not guarantee that each stream will
+     * return the same number or rows. Additionally, the limits are enforced based
+     * on the number of pre-filtering rows, so some filters can lead to lopsided
+     * assignments.
+     * 
+ * + * BALANCED = 2; + */ + public static final int BALANCED_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ShardingStrategy valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static ShardingStrategy forNumber(int value) { + switch (value) { + case 0: + return SHARDING_STRATEGY_UNSPECIFIED; + case 1: + return LIQUID; + case 2: + return BALANCED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ShardingStrategy findValueByNumber(int number) { + return ShardingStrategy.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.getDescriptor() + .getEnumTypes() + .get(1); + } + + private static final ShardingStrategy[] VALUES = values(); + + public static ShardingStrategy valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ShardingStrategy(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1beta1.ShardingStrategy) + } + + public interface StreamOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.Stream) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/streams/{stream_id}`.
+     * 
+ * + * string name = 1; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+     * Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/streams/{stream_id}`.
+     * 
+ * + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + } + + /** + * + * + *
+   * Information about a single data stream within a read session.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.Stream} + */ + public static final class Stream extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.Stream) + StreamOrBuilder { + private static final long serialVersionUID = 0L; + + // Use Stream.newBuilder() to construct. + private Stream(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Stream() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new Stream(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_Stream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_Stream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+     * Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/streams/{stream_id}`.
+     * 
+ * + * string name = 1; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+     * Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/streams/{stream_id}`.
+     * 
+ * + * string name = 1; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.Stream)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.Stream) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Information about a single data stream within a read session.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.Stream} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.Stream) + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_Stream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_Stream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_Stream_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream result = + new com.google.cloud.bigquery.storage.v1beta1.Storage.Stream(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta1.Storage.Stream result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.Stream) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta1.Storage.Stream) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta1.Storage.Stream other) { + if (other == com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+       * Name of the stream, in the form
+       * `projects/{project_id}/locations/{location}/streams/{stream_id}`.
+       * 
+ * + * string name = 1; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Name of the stream, in the form
+       * `projects/{project_id}/locations/{location}/streams/{stream_id}`.
+       * 
+ * + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Name of the stream, in the form
+       * `projects/{project_id}/locations/{location}/streams/{stream_id}`.
+       * 
+ * + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Name of the stream, in the form
+       * `projects/{project_id}/locations/{location}/streams/{stream_id}`.
+       * 
+ * + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+       * Name of the stream, in the form
+       * `projects/{project_id}/locations/{location}/streams/{stream_id}`.
+       * 
+ * + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.Stream) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.Stream) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage.Stream DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta1.Storage.Stream(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Stream parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface StreamPositionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.StreamPosition) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Identifier for a given Stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + * + * @return Whether the stream field is set. + */ + boolean hasStream(); + + /** + * + * + *
+     * Identifier for a given Stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + * + * @return The stream. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getStream(); + + /** + * + * + *
+     * Identifier for a given Stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getStreamOrBuilder(); + + /** + * + * + *
+     * Position in the stream.
+     * 
+ * + * int64 offset = 2; + * + * @return The offset. + */ + long getOffset(); + } + + /** + * + * + *
+   * Expresses a point within a given stream using an offset position.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.StreamPosition} + */ + public static final class StreamPosition extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.StreamPosition) + StreamPositionOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StreamPosition.newBuilder() to construct. + private StreamPosition(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamPosition() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StreamPosition(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_StreamPosition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_StreamPosition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition.Builder.class); + } + + private int bitField0_; + public static final int STREAM_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1beta1.Storage.Stream stream_; + + /** + * + * + *
+     * Identifier for a given Stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + * + * @return Whether the stream field is set. + */ + @java.lang.Override + public boolean hasStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Identifier for a given Stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + * + * @return The stream. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getStream() { + return stream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : stream_; + } + + /** + * + * + *
+     * Identifier for a given Stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getStreamOrBuilder() { + return stream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : stream_; + } + + public static final int OFFSET_FIELD_NUMBER = 2; + private long offset_ = 0L; + + /** + * + * + *
+     * Position in the stream.
+     * 
+ * + * int64 offset = 2; + * + * @return The offset. + */ + @java.lang.Override + public long getOffset() { + return offset_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getStream()); + } + if (offset_ != 0L) { + output.writeInt64(2, offset_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getStream()); + } + if (offset_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, offset_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition) obj; + + if (hasStream() != other.hasStream()) return false; + if (hasStream()) { + if (!getStream().equals(other.getStream())) return false; + } + if (getOffset() != other.getOffset()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasStream()) { + hash = (37 * hash) + STREAM_FIELD_NUMBER; + hash = (53 * hash) + getStream().hashCode(); + } + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Expresses a point within a given stream using an offset position.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.StreamPosition} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.StreamPosition) + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPositionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_StreamPosition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_StreamPosition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getStreamFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + stream_ = null; + if (streamBuilder_ != null) { + streamBuilder_.dispose(); + streamBuilder_ = null; + } + offset_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_StreamPosition_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition result = + new com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.stream_ = streamBuilder_ == null ? stream_ : streamBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.offset_ = offset_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition + .getDefaultInstance()) return this; + if (other.hasStream()) { + mergeStream(other.getStream()); + } + if (other.getOffset() != 0L) { + setOffset(other.getOffset()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getStreamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + offset_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1beta1.Storage.Stream stream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + streamBuilder_; + + /** + * + * + *
+       * Identifier for a given Stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + * + * @return Whether the stream field is set. + */ + public boolean hasStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Identifier for a given Stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + * + * @return The stream. + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getStream() { + if (streamBuilder_ == null) { + return stream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : stream_; + } else { + return streamBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Identifier for a given Stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + */ + public Builder setStream(com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (streamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stream_ = value; + } else { + streamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Identifier for a given Stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + */ + public Builder setStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder builderForValue) { + if (streamBuilder_ == null) { + stream_ = builderForValue.build(); + } else { + streamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Identifier for a given Stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + */ + public Builder mergeStream(com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (streamBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && stream_ != null + && stream_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.Stream + .getDefaultInstance()) { + getStreamBuilder().mergeFrom(value); + } else { + stream_ = value; + } + } else { + streamBuilder_.mergeFrom(value); + } + if (stream_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Identifier for a given Stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + */ + public Builder clearStream() { + bitField0_ = (bitField0_ & ~0x00000001); + stream_ = null; + if (streamBuilder_ != null) { + streamBuilder_.dispose(); + streamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Identifier for a given Stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder getStreamBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getStreamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Identifier for a given Stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder + getStreamOrBuilder() { + if (streamBuilder_ != null) { + return streamBuilder_.getMessageOrBuilder(); + } else { + return stream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : stream_; + } + } + + /** + * + * + *
+       * Identifier for a given Stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + getStreamFieldBuilder() { + if (streamBuilder_ == null) { + streamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder>( + getStream(), getParentForChildren(), isClean()); + stream_ = null; + } + return streamBuilder_; + } + + private long offset_; + + /** + * + * + *
+       * Position in the stream.
+       * 
+ * + * int64 offset = 2; + * + * @return The offset. + */ + @java.lang.Override + public long getOffset() { + return offset_; + } + + /** + * + * + *
+       * Position in the stream.
+       * 
+ * + * int64 offset = 2; + * + * @param value The offset to set. + * @return This builder for chaining. + */ + public Builder setOffset(long value) { + + offset_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Position in the stream.
+       * 
+ * + * int64 offset = 2; + * + * @return This builder for chaining. + */ + public Builder clearOffset() { + bitField0_ = (bitField0_ & ~0x00000002); + offset_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.StreamPosition) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.StreamPosition) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StreamPosition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ReadSessionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.ReadSession) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+     * Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+     * Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors.
+     * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + * + * @return Whether the expireTime field is set. + */ + boolean hasExpireTime(); + + /** + * + * + *
+     * Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors.
+     * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + * + * @return The expireTime. + */ + com.google.protobuf.Timestamp getExpireTime(); + + /** + * + * + *
+     * Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors.
+     * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder(); + + /** + * + * + *
+     * Avro schema.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + * + * @return Whether the avroSchema field is set. + */ + boolean hasAvroSchema(); + + /** + * + * + *
+     * Avro schema.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + * + * @return The avroSchema. + */ + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema getAvroSchema(); + + /** + * + * + *
+     * Avro schema.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + */ + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder + getAvroSchemaOrBuilder(); + + /** + * + * + *
+     * Arrow schema.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + * + * @return Whether the arrowSchema field is set. + */ + boolean hasArrowSchema(); + + /** + * + * + *
+     * Arrow schema.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + * + * @return The arrowSchema. + */ + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema getArrowSchema(); + + /** + * + * + *
+     * Arrow schema.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + */ + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder + getArrowSchemaOrBuilder(); + + /** + * + * + *
+     * Streams associated with this session.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + java.util.List getStreamsList(); + + /** + * + * + *
+     * Streams associated with this session.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getStreams(int index); + + /** + * + * + *
+     * Streams associated with this session.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + int getStreamsCount(); + + /** + * + * + *
+     * Streams associated with this session.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + java.util.List + getStreamsOrBuilderList(); + + /** + * + * + *
+     * Streams associated with this session.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getStreamsOrBuilder( + int index); + + /** + * + * + *
+     * Table that this ReadSession is reading from.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + * + * @return Whether the tableReference field is set. + */ + boolean hasTableReference(); + + /** + * + * + *
+     * Table that this ReadSession is reading from.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + * + * @return The tableReference. + */ + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + getTableReference(); + + /** + * + * + *
+     * Table that this ReadSession is reading from.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + */ + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReferenceOrBuilder + getTableReferenceOrBuilder(); + + /** + * + * + *
+     * Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + * + * @return Whether the tableModifiers field is set. + */ + boolean hasTableModifiers(); + + /** + * + * + *
+     * Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + * + * @return The tableModifiers. + */ + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + getTableModifiers(); + + /** + * + * + *
+     * Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + */ + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiersOrBuilder + getTableModifiersOrBuilder(); + + /** + * + * + *
+     * The strategy to use for distributing data among the streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 9; + * + * @return The enum numeric value on the wire for shardingStrategy. + */ + int getShardingStrategyValue(); + + /** + * + * + *
+     * The strategy to use for distributing data among the streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 9; + * + * @return The shardingStrategy. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy getShardingStrategy(); + + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.SchemaCase getSchemaCase(); + } + + /** + * + * + *
+   * Information returned from a `CreateReadSession` request.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.ReadSession} + */ + public static final class ReadSession extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.ReadSession) + ReadSessionOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadSession.newBuilder() to construct. + private ReadSession(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadSession() { + name_ = ""; + streams_ = java.util.Collections.emptyList(); + shardingStrategy_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadSession(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadSession_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadSession_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.Builder.class); + } + + private int bitField0_; + private int schemaCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object schema_; + + public enum SchemaCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + AVRO_SCHEMA(5), + ARROW_SCHEMA(6), + SCHEMA_NOT_SET(0); + private final int value; + + private SchemaCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SchemaCase valueOf(int value) { + return forNumber(value); + } + + public static SchemaCase forNumber(int value) { + switch (value) { + case 5: + return AVRO_SCHEMA; + case 6: + return ARROW_SCHEMA; + case 0: + return SCHEMA_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+     * Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+     * Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EXPIRE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp expireTime_; + + /** + * + * + *
+     * Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors.
+     * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + * + * @return Whether the expireTime field is set. + */ + @java.lang.Override + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors.
+     * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + * + * @return The expireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getExpireTime() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + /** + * + * + *
+     * Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors.
+     * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + public static final int AVRO_SCHEMA_FIELD_NUMBER = 5; + + /** + * + * + *
+     * Avro schema.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 5; + } + + /** + * + * + *
+     * Avro schema.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema getAvroSchema() { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.getDefaultInstance(); + } + + /** + * + * + *
+     * Avro schema.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder + getAvroSchemaOrBuilder() { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.getDefaultInstance(); + } + + public static final int ARROW_SCHEMA_FIELD_NUMBER = 6; + + /** + * + * + *
+     * Arrow schema.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 6; + } + + /** + * + * + *
+     * Arrow schema.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema getArrowSchema() { + if (schemaCase_ == 6) { + return (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.getDefaultInstance(); + } + + /** + * + * + *
+     * Arrow schema.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder + getArrowSchemaOrBuilder() { + if (schemaCase_ == 6) { + return (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.getDefaultInstance(); + } + + public static final int STREAMS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List streams_; + + /** + * + * + *
+     * Streams associated with this session.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + @java.lang.Override + public java.util.List + getStreamsList() { + return streams_; + } + + /** + * + * + *
+     * Streams associated with this session.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + getStreamsOrBuilderList() { + return streams_; + } + + /** + * + * + *
+     * Streams associated with this session.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + @java.lang.Override + public int getStreamsCount() { + return streams_.size(); + } + + /** + * + * + *
+     * Streams associated with this session.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getStreams(int index) { + return streams_.get(index); + } + + /** + * + * + *
+     * Streams associated with this session.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getStreamsOrBuilder( + int index) { + return streams_.get(index); + } + + public static final int TABLE_REFERENCE_FIELD_NUMBER = 7; + private com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + tableReference_; + + /** + * + * + *
+     * Table that this ReadSession is reading from.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + * + * @return Whether the tableReference field is set. + */ + @java.lang.Override + public boolean hasTableReference() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Table that this ReadSession is reading from.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + * + * @return The tableReference. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + getTableReference() { + return tableReference_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .getDefaultInstance() + : tableReference_; + } + + /** + * + * + *
+     * Table that this ReadSession is reading from.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReferenceOrBuilder + getTableReferenceOrBuilder() { + return tableReference_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .getDefaultInstance() + : tableReference_; + } + + public static final int TABLE_MODIFIERS_FIELD_NUMBER = 8; + private com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + tableModifiers_; + + /** + * + * + *
+     * Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + * + * @return Whether the tableModifiers field is set. + */ + @java.lang.Override + public boolean hasTableModifiers() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + * + * @return The tableModifiers. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + getTableModifiers() { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .getDefaultInstance() + : tableModifiers_; + } + + /** + * + * + *
+     * Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiersOrBuilder + getTableModifiersOrBuilder() { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .getDefaultInstance() + : tableModifiers_; + } + + public static final int SHARDING_STRATEGY_FIELD_NUMBER = 9; + private int shardingStrategy_ = 0; + + /** + * + * + *
+     * The strategy to use for distributing data among the streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 9; + * + * @return The enum numeric value on the wire for shardingStrategy. + */ + @java.lang.Override + public int getShardingStrategyValue() { + return shardingStrategy_; + } + + /** + * + * + *
+     * The strategy to use for distributing data among the streams.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 9; + * + * @return The shardingStrategy. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy + getShardingStrategy() { + com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy result = + com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy.forNumber( + shardingStrategy_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getExpireTime()); + } + for (int i = 0; i < streams_.size(); i++) { + output.writeMessage(4, streams_.get(i)); + } + if (schemaCase_ == 5) { + output.writeMessage( + 5, (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_); + } + if (schemaCase_ == 6) { + output.writeMessage( + 6, (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(7, getTableReference()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(8, getTableModifiers()); + } + if (shardingStrategy_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy + .SHARDING_STRATEGY_UNSPECIFIED + .getNumber()) { + output.writeEnum(9, shardingStrategy_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getExpireTime()); + } + for (int i = 0; i < streams_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, streams_.get(i)); + } + if (schemaCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 5, (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_); + } + if (schemaCase_ == 6) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 6, (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getTableReference()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getTableModifiers()); + } + if (shardingStrategy_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy + .SHARDING_STRATEGY_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(9, shardingStrategy_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession) obj; + + if (!getName().equals(other.getName())) return false; + if (hasExpireTime() != other.hasExpireTime()) return false; + if (hasExpireTime()) { + if (!getExpireTime().equals(other.getExpireTime())) return false; + } + if (!getStreamsList().equals(other.getStreamsList())) return false; + if (hasTableReference() != other.hasTableReference()) return false; + if (hasTableReference()) { + if (!getTableReference().equals(other.getTableReference())) return false; + } + if (hasTableModifiers() != other.hasTableModifiers()) return false; + if (hasTableModifiers()) { + if (!getTableModifiers().equals(other.getTableModifiers())) return false; + } + if (shardingStrategy_ != other.shardingStrategy_) return false; + if (!getSchemaCase().equals(other.getSchemaCase())) return false; + switch (schemaCase_) { + case 5: + if (!getAvroSchema().equals(other.getAvroSchema())) return false; + break; + case 6: + if (!getArrowSchema().equals(other.getArrowSchema())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasExpireTime()) { + hash = (37 * hash) + EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getExpireTime().hashCode(); + } + if (getStreamsCount() > 0) { + hash = (37 * hash) + STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getStreamsList().hashCode(); + } + if (hasTableReference()) { + hash = (37 * hash) + TABLE_REFERENCE_FIELD_NUMBER; + hash = (53 * hash) + getTableReference().hashCode(); + } + if (hasTableModifiers()) { + hash = (37 * hash) + TABLE_MODIFIERS_FIELD_NUMBER; + hash = (53 * hash) + getTableModifiers().hashCode(); + } + hash = (37 * hash) + SHARDING_STRATEGY_FIELD_NUMBER; + hash = (53 * hash) + shardingStrategy_; + switch (schemaCase_) { + case 5: + hash = (37 * hash) + AVRO_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getAvroSchema().hashCode(); + break; + case 6: + hash = (37 * hash) + ARROW_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getArrowSchema().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Information returned from a `CreateReadSession` request.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.ReadSession} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.ReadSession) + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSessionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadSession_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadSession_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getExpireTimeFieldBuilder(); + getStreamsFieldBuilder(); + getTableReferenceFieldBuilder(); + getTableModifiersFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + if (avroSchemaBuilder_ != null) { + avroSchemaBuilder_.clear(); + } + if (arrowSchemaBuilder_ != null) { + arrowSchemaBuilder_.clear(); + } + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + } else { + streams_ = null; + streamsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + tableReference_ = null; + if (tableReferenceBuilder_ != null) { + tableReferenceBuilder_.dispose(); + tableReferenceBuilder_ = null; + } + tableModifiers_ = null; + if (tableModifiersBuilder_ != null) { + tableModifiersBuilder_.dispose(); + tableModifiersBuilder_ = null; + } + shardingStrategy_ = 0; + schemaCase_ = 0; + schema_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadSession_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession result = + new com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession result) { + if (streamsBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0)) { + streams_ = java.util.Collections.unmodifiableList(streams_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.streams_ = streams_; + } else { + result.streams_ = streamsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.expireTime_ = + expireTimeBuilder_ == null ? expireTime_ : expireTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.tableReference_ = + tableReferenceBuilder_ == null ? tableReference_ : tableReferenceBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.tableModifiers_ = + tableModifiersBuilder_ == null ? tableModifiers_ : tableModifiersBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.shardingStrategy_ = shardingStrategy_; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession result) { + result.schemaCase_ = schemaCase_; + result.schema_ = this.schema_; + if (schemaCase_ == 5 && avroSchemaBuilder_ != null) { + result.schema_ = avroSchemaBuilder_.build(); + } + if (schemaCase_ == 6 && arrowSchemaBuilder_ != null) { + result.schema_ = arrowSchemaBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasExpireTime()) { + mergeExpireTime(other.getExpireTime()); + } + if (streamsBuilder_ == null) { + if (!other.streams_.isEmpty()) { + if (streams_.isEmpty()) { + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureStreamsIsMutable(); + streams_.addAll(other.streams_); + } + onChanged(); + } + } else { + if (!other.streams_.isEmpty()) { + if (streamsBuilder_.isEmpty()) { + streamsBuilder_.dispose(); + streamsBuilder_ = null; + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000010); + streamsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getStreamsFieldBuilder() + : null; + } else { + streamsBuilder_.addAllMessages(other.streams_); + } + } + } + if (other.hasTableReference()) { + mergeTableReference(other.getTableReference()); + } + if (other.hasTableModifiers()) { + mergeTableModifiers(other.getTableModifiers()); + } + if (other.shardingStrategy_ != 0) { + setShardingStrategyValue(other.getShardingStrategyValue()); + } + switch (other.getSchemaCase()) { + case AVRO_SCHEMA: + { + mergeAvroSchema(other.getAvroSchema()); + break; + } + case ARROW_SCHEMA: + { + mergeArrowSchema(other.getArrowSchema()); + break; + } + case SCHEMA_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 34: + { + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.parser(), + extensionRegistry); + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(m); + } else { + streamsBuilder_.addMessage(m); + } + break; + } // case 34 + case 42: + { + input.readMessage(getAvroSchemaFieldBuilder().getBuilder(), extensionRegistry); + schemaCase_ = 5; + break; + } // case 42 + case 50: + { + input.readMessage(getArrowSchemaFieldBuilder().getBuilder(), extensionRegistry); + schemaCase_ = 6; + break; + } // case 50 + case 58: + { + input.readMessage( + getTableReferenceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 58 + case 66: + { + input.readMessage( + getTableModifiersFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 66 + case 72: + { + shardingStrategy_ = input.readEnum(); + bitField0_ |= 0x00000080; + break; + } // case 72 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int schemaCase_ = 0; + private java.lang.Object schema_; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public Builder clearSchema() { + schemaCase_ = 0; + schema_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+       * Unique identifier for the session, in the form
+       * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+       * 
+ * + * string name = 1; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Unique identifier for the session, in the form
+       * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+       * 
+ * + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Unique identifier for the session, in the form
+       * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+       * 
+ * + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Unique identifier for the session, in the form
+       * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+       * 
+ * + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+       * Unique identifier for the session, in the form
+       * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+       * 
+ * + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp expireTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + expireTimeBuilder_; + + /** + * + * + *
+       * Time at which the session becomes invalid. After this time, subsequent
+       * requests to read this Session will return errors.
+       * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Time at which the session becomes invalid. After this time, subsequent
+       * requests to read this Session will return errors.
+       * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + if (expireTimeBuilder_ == null) { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } else { + return expireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Time at which the session becomes invalid. After this time, subsequent
+       * requests to read this Session will return errors.
+       * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + */ + public Builder setExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + expireTime_ = value; + } else { + expireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Time at which the session becomes invalid. After this time, subsequent
+       * requests to read this Session will return errors.
+       * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + */ + public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (expireTimeBuilder_ == null) { + expireTime_ = builderForValue.build(); + } else { + expireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Time at which the session becomes invalid. After this time, subsequent
+       * requests to read this Session will return errors.
+       * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + */ + public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && expireTime_ != null + && expireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getExpireTimeBuilder().mergeFrom(value); + } else { + expireTime_ = value; + } + } else { + expireTimeBuilder_.mergeFrom(value); + } + if (expireTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Time at which the session becomes invalid. After this time, subsequent
+       * requests to read this Session will return errors.
+       * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + */ + public Builder clearExpireTime() { + bitField0_ = (bitField0_ & ~0x00000002); + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Time at which the session becomes invalid. After this time, subsequent
+       * requests to read this Session will return errors.
+       * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Time at which the session becomes invalid. After this time, subsequent
+       * requests to read this Session will return errors.
+       * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + if (expireTimeBuilder_ != null) { + return expireTimeBuilder_.getMessageOrBuilder(); + } else { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } + } + + /** + * + * + *
+       * Time at which the session becomes invalid. After this time, subsequent
+       * requests to read this Session will return errors.
+       * 
+ * + * .google.protobuf.Timestamp expire_time = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getExpireTimeFieldBuilder() { + if (expireTimeBuilder_ == null) { + expireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getExpireTime(), getParentForChildren(), isClean()); + expireTime_ = null; + } + return expireTimeBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder> + avroSchemaBuilder_; + + /** + * + * + *
+       * Avro schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 5; + } + + /** + * + * + *
+       * Avro schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema getAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema + .getDefaultInstance(); + } else { + if (schemaCase_ == 5) { + return avroSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Avro schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + */ + public Builder setAvroSchema( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + avroSchemaBuilder_.setMessage(value); + } + schemaCase_ = 5; + return this; + } + + /** + * + * + *
+       * Avro schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + */ + public Builder setAvroSchema( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.Builder builderForValue) { + if (avroSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + avroSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 5; + return this; + } + + /** + * + * + *
+       * Avro schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + */ + public Builder mergeAvroSchema( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 5 + && schema_ + != com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema + .getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 5) { + avroSchemaBuilder_.mergeFrom(value); + } else { + avroSchemaBuilder_.setMessage(value); + } + } + schemaCase_ = 5; + return this; + } + + /** + * + * + *
+       * Avro schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + */ + public Builder clearAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 5) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 5) { + schemaCase_ = 0; + schema_ = null; + } + avroSchemaBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Avro schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + */ + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.Builder + getAvroSchemaBuilder() { + return getAvroSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Avro schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder + getAvroSchemaOrBuilder() { + if ((schemaCase_ == 5) && (avroSchemaBuilder_ != null)) { + return avroSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Avro schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder> + getAvroSchemaFieldBuilder() { + if (avroSchemaBuilder_ == null) { + if (!(schemaCase_ == 5)) { + schema_ = + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.getDefaultInstance(); + } + avroSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 5; + onChanged(); + return avroSchemaBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder> + arrowSchemaBuilder_; + + /** + * + * + *
+       * Arrow schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 6; + } + + /** + * + * + *
+       * Arrow schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema getArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 6) { + return (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + .getDefaultInstance(); + } else { + if (schemaCase_ == 6) { + return arrowSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Arrow schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + */ + public Builder setArrowSchema( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(value); + } + schemaCase_ = 6; + return this; + } + + /** + * + * + *
+       * Arrow schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + */ + public Builder setArrowSchema( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.Builder + builderForValue) { + if (arrowSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 6; + return this; + } + + /** + * + * + *
+       * Arrow schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + */ + public Builder mergeArrowSchema( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 6 + && schema_ + != com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + .getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 6) { + arrowSchemaBuilder_.mergeFrom(value); + } else { + arrowSchemaBuilder_.setMessage(value); + } + } + schemaCase_ = 6; + return this; + } + + /** + * + * + *
+       * Arrow schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + */ + public Builder clearArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 6) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 6) { + schemaCase_ = 0; + schema_ = null; + } + arrowSchemaBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Arrow schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + */ + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.Builder + getArrowSchemaBuilder() { + return getArrowSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Arrow schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder + getArrowSchemaOrBuilder() { + if ((schemaCase_ == 6) && (arrowSchemaBuilder_ != null)) { + return arrowSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 6) { + return (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Arrow schema.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 6; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder> + getArrowSchemaFieldBuilder() { + if (arrowSchemaBuilder_ == null) { + if (!(schemaCase_ == 6)) { + schema_ = + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + .getDefaultInstance(); + } + arrowSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 6; + onChanged(); + return arrowSchemaBuilder_; + } + + private java.util.List streams_ = + java.util.Collections.emptyList(); + + private void ensureStreamsIsMutable() { + if (!((bitField0_ & 0x00000010) != 0)) { + streams_ = + new java.util.ArrayList( + streams_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + streamsBuilder_; + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public java.util.List + getStreamsList() { + if (streamsBuilder_ == null) { + return java.util.Collections.unmodifiableList(streams_); + } else { + return streamsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public int getStreamsCount() { + if (streamsBuilder_ == null) { + return streams_.size(); + } else { + return streamsBuilder_.getCount(); + } + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getStreams(int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public Builder setStreams( + int index, com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.set(index, value); + onChanged(); + } else { + streamsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public Builder setStreams( + int index, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.set(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public Builder addStreams(com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(value); + onChanged(); + } else { + streamsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public Builder addStreams( + int index, com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(index, value); + onChanged(); + } else { + streamsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public Builder addStreams( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public Builder addStreams( + int index, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public Builder addAllStreams( + java.lang.Iterable + values) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, streams_); + onChanged(); + } else { + streamsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public Builder clearStreams() { + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + streamsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public Builder removeStreams(int index) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.remove(index); + onChanged(); + } else { + streamsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder getStreamsBuilder( + int index) { + return getStreamsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getStreamsOrBuilder( + int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + getStreamsOrBuilderList() { + if (streamsBuilder_ != null) { + return streamsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(streams_); + } + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder addStreamsBuilder() { + return getStreamsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance()); + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder addStreamsBuilder( + int index) { + return getStreamsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance()); + } + + /** + * + * + *
+       * Streams associated with this session.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 4; + */ + public java.util.List + getStreamsBuilderList() { + return getStreamsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + getStreamsFieldBuilder() { + if (streamsBuilder_ == null) { + streamsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder>( + streams_, ((bitField0_ & 0x00000010) != 0), getParentForChildren(), isClean()); + streams_ = null; + } + return streamsBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + tableReference_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.Builder, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReferenceOrBuilder> + tableReferenceBuilder_; + + /** + * + * + *
+       * Table that this ReadSession is reading from.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + * + * @return Whether the tableReference field is set. + */ + public boolean hasTableReference() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
+       * Table that this ReadSession is reading from.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + * + * @return The tableReference. + */ + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + getTableReference() { + if (tableReferenceBuilder_ == null) { + return tableReference_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .getDefaultInstance() + : tableReference_; + } else { + return tableReferenceBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Table that this ReadSession is reading from.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + */ + public Builder setTableReference( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference value) { + if (tableReferenceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableReference_ = value; + } else { + tableReferenceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+       * Table that this ReadSession is reading from.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + */ + public Builder setTableReference( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.Builder + builderForValue) { + if (tableReferenceBuilder_ == null) { + tableReference_ = builderForValue.build(); + } else { + tableReferenceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+       * Table that this ReadSession is reading from.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + */ + public Builder mergeTableReference( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference value) { + if (tableReferenceBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && tableReference_ != null + && tableReference_ + != com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .getDefaultInstance()) { + getTableReferenceBuilder().mergeFrom(value); + } else { + tableReference_ = value; + } + } else { + tableReferenceBuilder_.mergeFrom(value); + } + if (tableReference_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Table that this ReadSession is reading from.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + */ + public Builder clearTableReference() { + bitField0_ = (bitField0_ & ~0x00000020); + tableReference_ = null; + if (tableReferenceBuilder_ != null) { + tableReferenceBuilder_.dispose(); + tableReferenceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Table that this ReadSession is reading from.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + */ + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.Builder + getTableReferenceBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return getTableReferenceFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Table that this ReadSession is reading from.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + */ + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReferenceOrBuilder + getTableReferenceOrBuilder() { + if (tableReferenceBuilder_ != null) { + return tableReferenceBuilder_.getMessageOrBuilder(); + } else { + return tableReference_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .getDefaultInstance() + : tableReference_; + } + } + + /** + * + * + *
+       * Table that this ReadSession is reading from.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 7; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.Builder, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReferenceOrBuilder> + getTableReferenceFieldBuilder() { + if (tableReferenceBuilder_ == null) { + tableReferenceBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .Builder, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .TableReferenceOrBuilder>( + getTableReference(), getParentForChildren(), isClean()); + tableReference_ = null; + } + return tableReferenceBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + tableModifiers_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiersOrBuilder> + tableModifiersBuilder_; + + /** + * + * + *
+       * Any modifiers which are applied when reading from the specified table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + * + * @return Whether the tableModifiers field is set. + */ + public boolean hasTableModifiers() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+       * Any modifiers which are applied when reading from the specified table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + * + * @return The tableModifiers. + */ + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + getTableModifiers() { + if (tableModifiersBuilder_ == null) { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .getDefaultInstance() + : tableModifiers_; + } else { + return tableModifiersBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Any modifiers which are applied when reading from the specified table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + */ + public Builder setTableModifiers( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers value) { + if (tableModifiersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableModifiers_ = value; + } else { + tableModifiersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+       * Any modifiers which are applied when reading from the specified table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + */ + public Builder setTableModifiers( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.Builder + builderForValue) { + if (tableModifiersBuilder_ == null) { + tableModifiers_ = builderForValue.build(); + } else { + tableModifiersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+       * Any modifiers which are applied when reading from the specified table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + */ + public Builder mergeTableModifiers( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers value) { + if (tableModifiersBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && tableModifiers_ != null + && tableModifiers_ + != com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .getDefaultInstance()) { + getTableModifiersBuilder().mergeFrom(value); + } else { + tableModifiers_ = value; + } + } else { + tableModifiersBuilder_.mergeFrom(value); + } + if (tableModifiers_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Any modifiers which are applied when reading from the specified table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + */ + public Builder clearTableModifiers() { + bitField0_ = (bitField0_ & ~0x00000040); + tableModifiers_ = null; + if (tableModifiersBuilder_ != null) { + tableModifiersBuilder_.dispose(); + tableModifiersBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Any modifiers which are applied when reading from the specified table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + */ + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.Builder + getTableModifiersBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return getTableModifiersFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Any modifiers which are applied when reading from the specified table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + */ + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiersOrBuilder + getTableModifiersOrBuilder() { + if (tableModifiersBuilder_ != null) { + return tableModifiersBuilder_.getMessageOrBuilder(); + } else { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .getDefaultInstance() + : tableModifiers_; + } + } + + /** + * + * + *
+       * Any modifiers which are applied when reading from the specified table.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 8; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiersOrBuilder> + getTableModifiersFieldBuilder() { + if (tableModifiersBuilder_ == null) { + tableModifiersBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .Builder, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .TableModifiersOrBuilder>( + getTableModifiers(), getParentForChildren(), isClean()); + tableModifiers_ = null; + } + return tableModifiersBuilder_; + } + + private int shardingStrategy_ = 0; + + /** + * + * + *
+       * The strategy to use for distributing data among the streams.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 9; + * + * @return The enum numeric value on the wire for shardingStrategy. + */ + @java.lang.Override + public int getShardingStrategyValue() { + return shardingStrategy_; + } + + /** + * + * + *
+       * The strategy to use for distributing data among the streams.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 9; + * + * @param value The enum numeric value on the wire for shardingStrategy to set. + * @return This builder for chaining. + */ + public Builder setShardingStrategyValue(int value) { + shardingStrategy_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+       * The strategy to use for distributing data among the streams.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 9; + * + * @return The shardingStrategy. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy + getShardingStrategy() { + com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy result = + com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy.forNumber( + shardingStrategy_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy.UNRECOGNIZED + : result; + } + + /** + * + * + *
+       * The strategy to use for distributing data among the streams.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 9; + * + * @param value The shardingStrategy to set. + * @return This builder for chaining. + */ + public Builder setShardingStrategy( + com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000080; + shardingStrategy_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+       * The strategy to use for distributing data among the streams.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 9; + * + * @return This builder for chaining. + */ + public Builder clearShardingStrategy() { + bitField0_ = (bitField0_ & ~0x00000080); + shardingStrategy_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.ReadSession) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.ReadSession) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadSession parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface CreateReadSessionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. Reference to the table to read.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the tableReference field is set. + */ + boolean hasTableReference(); + + /** + * + * + *
+     * Required. Reference to the table to read.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The tableReference. + */ + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + getTableReference(); + + /** + * + * + *
+     * Required. Reference to the table to read.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReferenceOrBuilder + getTableReferenceOrBuilder(); + + /** + * + * + *
+     * Required. String of the form `projects/{project_id}` indicating the
+     * project this ReadSession is associated with. This is the project that will
+     * be billed for usage.
+     * 
+ * + * + * string parent = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+     * Required. String of the form `projects/{project_id}` indicating the
+     * project this ReadSession is associated with. This is the project that will
+     * be billed for usage.
+     * 
+ * + * + * string parent = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+     * Any modifiers to the Table (e.g. snapshot timestamp).
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + * + * @return Whether the tableModifiers field is set. + */ + boolean hasTableModifiers(); + + /** + * + * + *
+     * Any modifiers to the Table (e.g. snapshot timestamp).
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + * + * @return The tableModifiers. + */ + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + getTableModifiers(); + + /** + * + * + *
+     * Any modifiers to the Table (e.g. snapshot timestamp).
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + */ + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiersOrBuilder + getTableModifiersOrBuilder(); + + /** + * + * + *
+     * Initial number of streams. If unset or 0, we will
+     * provide a value of streams so as to produce reasonable throughput. Must be
+     * non-negative. The number of streams may be lower than the requested number,
+     * depending on the amount parallelism that is reasonable for the table and
+     * the maximum amount of parallelism allowed by the system.
+     *
+     * Streams must be read starting from offset 0.
+     * 
+ * + * int32 requested_streams = 3; + * + * @return The requestedStreams. + */ + int getRequestedStreams(); + + /** + * + * + *
+     * Read options for this session (e.g. column selection, filters).
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + * + * @return Whether the readOptions field is set. + */ + boolean hasReadOptions(); + + /** + * + * + *
+     * Read options for this session (e.g. column selection, filters).
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + * + * @return The readOptions. + */ + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions getReadOptions(); + + /** + * + * + *
+     * Read options for this session (e.g. column selection, filters).
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + */ + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptionsOrBuilder + getReadOptionsOrBuilder(); + + /** + * + * + *
+     * Data output format. Currently default to Avro.
+     * DATA_FORMAT_UNSPECIFIED not supported.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.DataFormat format = 5; + * + * @return The enum numeric value on the wire for format. + */ + int getFormatValue(); + + /** + * + * + *
+     * Data output format. Currently default to Avro.
+     * DATA_FORMAT_UNSPECIFIED not supported.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.DataFormat format = 5; + * + * @return The format. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat getFormat(); + + /** + * + * + *
+     * The strategy to use for distributing data among multiple streams. Currently
+     * defaults to liquid sharding.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 7; + * + * @return The enum numeric value on the wire for shardingStrategy. + */ + int getShardingStrategyValue(); + + /** + * + * + *
+     * The strategy to use for distributing data among multiple streams. Currently
+     * defaults to liquid sharding.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 7; + * + * @return The shardingStrategy. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy getShardingStrategy(); + } + + /** + * + * + *
+   * Creates a new read session, which may include additional options such as
+   * requested parallelism, projection filters and constraints.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest} + */ + public static final class CreateReadSessionRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest) + CreateReadSessionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use CreateReadSessionRequest.newBuilder() to construct. + private CreateReadSessionRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateReadSessionRequest() { + parent_ = ""; + format_ = 0; + shardingStrategy_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateReadSessionRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_CreateReadSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_CreateReadSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest.Builder + .class); + } + + private int bitField0_; + public static final int TABLE_REFERENCE_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + tableReference_; + + /** + * + * + *
+     * Required. Reference to the table to read.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the tableReference field is set. + */ + @java.lang.Override + public boolean hasTableReference() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. Reference to the table to read.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The tableReference. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + getTableReference() { + return tableReference_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .getDefaultInstance() + : tableReference_; + } + + /** + * + * + *
+     * Required. Reference to the table to read.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReferenceOrBuilder + getTableReferenceOrBuilder() { + return tableReference_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .getDefaultInstance() + : tableReference_; + } + + public static final int PARENT_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. String of the form `projects/{project_id}` indicating the
+     * project this ReadSession is associated with. This is the project that will
+     * be billed for usage.
+     * 
+ * + * + * string parent = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+     * Required. String of the form `projects/{project_id}` indicating the
+     * project this ReadSession is associated with. This is the project that will
+     * be billed for usage.
+     * 
+ * + * + * string parent = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TABLE_MODIFIERS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + tableModifiers_; + + /** + * + * + *
+     * Any modifiers to the Table (e.g. snapshot timestamp).
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + * + * @return Whether the tableModifiers field is set. + */ + @java.lang.Override + public boolean hasTableModifiers() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Any modifiers to the Table (e.g. snapshot timestamp).
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + * + * @return The tableModifiers. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + getTableModifiers() { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .getDefaultInstance() + : tableModifiers_; + } + + /** + * + * + *
+     * Any modifiers to the Table (e.g. snapshot timestamp).
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiersOrBuilder + getTableModifiersOrBuilder() { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .getDefaultInstance() + : tableModifiers_; + } + + public static final int REQUESTED_STREAMS_FIELD_NUMBER = 3; + private int requestedStreams_ = 0; + + /** + * + * + *
+     * Initial number of streams. If unset or 0, we will
+     * provide a value of streams so as to produce reasonable throughput. Must be
+     * non-negative. The number of streams may be lower than the requested number,
+     * depending on the amount parallelism that is reasonable for the table and
+     * the maximum amount of parallelism allowed by the system.
+     *
+     * Streams must be read starting from offset 0.
+     * 
+ * + * int32 requested_streams = 3; + * + * @return The requestedStreams. + */ + @java.lang.Override + public int getRequestedStreams() { + return requestedStreams_; + } + + public static final int READ_OPTIONS_FIELD_NUMBER = 4; + private com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions readOptions_; + + /** + * + * + *
+     * Read options for this session (e.g. column selection, filters).
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + * + * @return Whether the readOptions field is set. + */ + @java.lang.Override + public boolean hasReadOptions() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Read options for this session (e.g. column selection, filters).
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + * + * @return The readOptions. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions getReadOptions() { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + .getDefaultInstance() + : readOptions_; + } + + /** + * + * + *
+     * Read options for this session (e.g. column selection, filters).
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptionsOrBuilder + getReadOptionsOrBuilder() { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + .getDefaultInstance() + : readOptions_; + } + + public static final int FORMAT_FIELD_NUMBER = 5; + private int format_ = 0; + + /** + * + * + *
+     * Data output format. Currently default to Avro.
+     * DATA_FORMAT_UNSPECIFIED not supported.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.DataFormat format = 5; + * + * @return The enum numeric value on the wire for format. + */ + @java.lang.Override + public int getFormatValue() { + return format_; + } + + /** + * + * + *
+     * Data output format. Currently default to Avro.
+     * DATA_FORMAT_UNSPECIFIED not supported.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.DataFormat format = 5; + * + * @return The format. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat getFormat() { + com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat result = + com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat.forNumber(format_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat.UNRECOGNIZED + : result; + } + + public static final int SHARDING_STRATEGY_FIELD_NUMBER = 7; + private int shardingStrategy_ = 0; + + /** + * + * + *
+     * The strategy to use for distributing data among multiple streams. Currently
+     * defaults to liquid sharding.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 7; + * + * @return The enum numeric value on the wire for shardingStrategy. + */ + @java.lang.Override + public int getShardingStrategyValue() { + return shardingStrategy_; + } + + /** + * + * + *
+     * The strategy to use for distributing data among multiple streams. Currently
+     * defaults to liquid sharding.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 7; + * + * @return The shardingStrategy. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy + getShardingStrategy() { + com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy result = + com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy.forNumber( + shardingStrategy_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getTableReference()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getTableModifiers()); + } + if (requestedStreams_ != 0) { + output.writeInt32(3, requestedStreams_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(4, getReadOptions()); + } + if (format_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat.DATA_FORMAT_UNSPECIFIED + .getNumber()) { + output.writeEnum(5, format_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, parent_); + } + if (shardingStrategy_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy + .SHARDING_STRATEGY_UNSPECIFIED + .getNumber()) { + output.writeEnum(7, shardingStrategy_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getTableReference()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getTableModifiers()); + } + if (requestedStreams_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, requestedStreams_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getReadOptions()); + } + if (format_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat.DATA_FORMAT_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(5, format_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, parent_); + } + if (shardingStrategy_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy + .SHARDING_STRATEGY_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(7, shardingStrategy_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest) obj; + + if (hasTableReference() != other.hasTableReference()) return false; + if (hasTableReference()) { + if (!getTableReference().equals(other.getTableReference())) return false; + } + if (!getParent().equals(other.getParent())) return false; + if (hasTableModifiers() != other.hasTableModifiers()) return false; + if (hasTableModifiers()) { + if (!getTableModifiers().equals(other.getTableModifiers())) return false; + } + if (getRequestedStreams() != other.getRequestedStreams()) return false; + if (hasReadOptions() != other.hasReadOptions()) return false; + if (hasReadOptions()) { + if (!getReadOptions().equals(other.getReadOptions())) return false; + } + if (format_ != other.format_) return false; + if (shardingStrategy_ != other.shardingStrategy_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasTableReference()) { + hash = (37 * hash) + TABLE_REFERENCE_FIELD_NUMBER; + hash = (53 * hash) + getTableReference().hashCode(); + } + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasTableModifiers()) { + hash = (37 * hash) + TABLE_MODIFIERS_FIELD_NUMBER; + hash = (53 * hash) + getTableModifiers().hashCode(); + } + hash = (37 * hash) + REQUESTED_STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getRequestedStreams(); + if (hasReadOptions()) { + hash = (37 * hash) + READ_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getReadOptions().hashCode(); + } + hash = (37 * hash) + FORMAT_FIELD_NUMBER; + hash = (53 * hash) + format_; + hash = (37 * hash) + SHARDING_STRATEGY_FIELD_NUMBER; + hash = (53 * hash) + shardingStrategy_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Creates a new read session, which may include additional options such as
+     * requested parallelism, projection filters and constraints.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest) + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_CreateReadSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_CreateReadSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getTableReferenceFieldBuilder(); + getTableModifiersFieldBuilder(); + getReadOptionsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + tableReference_ = null; + if (tableReferenceBuilder_ != null) { + tableReferenceBuilder_.dispose(); + tableReferenceBuilder_ = null; + } + parent_ = ""; + tableModifiers_ = null; + if (tableModifiersBuilder_ != null) { + tableModifiersBuilder_.dispose(); + tableModifiersBuilder_ = null; + } + requestedStreams_ = 0; + readOptions_ = null; + if (readOptionsBuilder_ != null) { + readOptionsBuilder_.dispose(); + readOptionsBuilder_ = null; + } + format_ = 0; + shardingStrategy_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_CreateReadSessionRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest result = + new com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.tableReference_ = + tableReferenceBuilder_ == null ? tableReference_ : tableReferenceBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.tableModifiers_ = + tableModifiersBuilder_ == null ? tableModifiers_ : tableModifiersBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.requestedStreams_ = requestedStreams_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.readOptions_ = + readOptionsBuilder_ == null ? readOptions_ : readOptionsBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.format_ = format_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.shardingStrategy_ = shardingStrategy_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + .getDefaultInstance()) return this; + if (other.hasTableReference()) { + mergeTableReference(other.getTableReference()); + } + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasTableModifiers()) { + mergeTableModifiers(other.getTableModifiers()); + } + if (other.getRequestedStreams() != 0) { + setRequestedStreams(other.getRequestedStreams()); + } + if (other.hasReadOptions()) { + mergeReadOptions(other.getReadOptions()); + } + if (other.format_ != 0) { + setFormatValue(other.getFormatValue()); + } + if (other.shardingStrategy_ != 0) { + setShardingStrategyValue(other.getShardingStrategyValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + getTableReferenceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + getTableModifiersFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 18 + case 24: + { + requestedStreams_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 24 + case 34: + { + input.readMessage(getReadOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 34 + case 40: + { + format_ = input.readEnum(); + bitField0_ |= 0x00000020; + break; + } // case 40 + case 50: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 50 + case 56: + { + shardingStrategy_ = input.readEnum(); + bitField0_ |= 0x00000040; + break; + } // case 56 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + tableReference_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.Builder, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReferenceOrBuilder> + tableReferenceBuilder_; + + /** + * + * + *
+       * Required. Reference to the table to read.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the tableReference field is set. + */ + public boolean hasTableReference() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Required. Reference to the table to read.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The tableReference. + */ + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + getTableReference() { + if (tableReferenceBuilder_ == null) { + return tableReference_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .getDefaultInstance() + : tableReference_; + } else { + return tableReferenceBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Required. Reference to the table to read.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setTableReference( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference value) { + if (tableReferenceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableReference_ = value; + } else { + tableReferenceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Reference to the table to read.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setTableReference( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.Builder + builderForValue) { + if (tableReferenceBuilder_ == null) { + tableReference_ = builderForValue.build(); + } else { + tableReferenceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Reference to the table to read.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeTableReference( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference value) { + if (tableReferenceBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && tableReference_ != null + && tableReference_ + != com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .getDefaultInstance()) { + getTableReferenceBuilder().mergeFrom(value); + } else { + tableReference_ = value; + } + } else { + tableReferenceBuilder_.mergeFrom(value); + } + if (tableReference_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Required. Reference to the table to read.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearTableReference() { + bitField0_ = (bitField0_ & ~0x00000001); + tableReference_ = null; + if (tableReferenceBuilder_ != null) { + tableReferenceBuilder_.dispose(); + tableReferenceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Reference to the table to read.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.Builder + getTableReferenceBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableReferenceFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Required. Reference to the table to read.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReferenceOrBuilder + getTableReferenceOrBuilder() { + if (tableReferenceBuilder_ != null) { + return tableReferenceBuilder_.getMessageOrBuilder(); + } else { + return tableReference_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .getDefaultInstance() + : tableReference_; + } + } + + /** + * + * + *
+       * Required. Reference to the table to read.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.TableReference table_reference = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.Builder, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReferenceOrBuilder> + getTableReferenceFieldBuilder() { + if (tableReferenceBuilder_ == null) { + tableReferenceBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .Builder, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .TableReferenceOrBuilder>( + getTableReference(), getParentForChildren(), isClean()); + tableReference_ = null; + } + return tableReferenceBuilder_; + } + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+       * Required. String of the form `projects/{project_id}` indicating the
+       * project this ReadSession is associated with. This is the project that will
+       * be billed for usage.
+       * 
+ * + * + * string parent = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * Required. String of the form `projects/{project_id}` indicating the
+       * project this ReadSession is associated with. This is the project that will
+       * be billed for usage.
+       * 
+ * + * + * string parent = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * Required. String of the form `projects/{project_id}` indicating the
+       * project this ReadSession is associated with. This is the project that will
+       * be billed for usage.
+       * 
+ * + * + * string parent = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. String of the form `projects/{project_id}` indicating the
+       * project this ReadSession is associated with. This is the project that will
+       * be billed for usage.
+       * 
+ * + * + * string parent = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. String of the form `projects/{project_id}` indicating the
+       * project this ReadSession is associated with. This is the project that will
+       * be billed for usage.
+       * 
+ * + * + * string parent = 6 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + tableModifiers_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiersOrBuilder> + tableModifiersBuilder_; + + /** + * + * + *
+       * Any modifiers to the Table (e.g. snapshot timestamp).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + * + * @return Whether the tableModifiers field is set. + */ + public boolean hasTableModifiers() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+       * Any modifiers to the Table (e.g. snapshot timestamp).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + * + * @return The tableModifiers. + */ + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + getTableModifiers() { + if (tableModifiersBuilder_ == null) { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .getDefaultInstance() + : tableModifiers_; + } else { + return tableModifiersBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Any modifiers to the Table (e.g. snapshot timestamp).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + */ + public Builder setTableModifiers( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers value) { + if (tableModifiersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableModifiers_ = value; + } else { + tableModifiersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Any modifiers to the Table (e.g. snapshot timestamp).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + */ + public Builder setTableModifiers( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.Builder + builderForValue) { + if (tableModifiersBuilder_ == null) { + tableModifiers_ = builderForValue.build(); + } else { + tableModifiersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Any modifiers to the Table (e.g. snapshot timestamp).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + */ + public Builder mergeTableModifiers( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers value) { + if (tableModifiersBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && tableModifiers_ != null + && tableModifiers_ + != com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .getDefaultInstance()) { + getTableModifiersBuilder().mergeFrom(value); + } else { + tableModifiers_ = value; + } + } else { + tableModifiersBuilder_.mergeFrom(value); + } + if (tableModifiers_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Any modifiers to the Table (e.g. snapshot timestamp).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + */ + public Builder clearTableModifiers() { + bitField0_ = (bitField0_ & ~0x00000004); + tableModifiers_ = null; + if (tableModifiersBuilder_ != null) { + tableModifiersBuilder_.dispose(); + tableModifiersBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Any modifiers to the Table (e.g. snapshot timestamp).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + */ + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.Builder + getTableModifiersBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getTableModifiersFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Any modifiers to the Table (e.g. snapshot timestamp).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + */ + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiersOrBuilder + getTableModifiersOrBuilder() { + if (tableModifiersBuilder_ != null) { + return tableModifiersBuilder_.getMessageOrBuilder(); + } else { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .getDefaultInstance() + : tableModifiers_; + } + } + + /** + * + * + *
+       * Any modifiers to the Table (e.g. snapshot timestamp).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableModifiers table_modifiers = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiersOrBuilder> + getTableModifiersFieldBuilder() { + if (tableModifiersBuilder_ == null) { + tableModifiersBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .Builder, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .TableModifiersOrBuilder>( + getTableModifiers(), getParentForChildren(), isClean()); + tableModifiers_ = null; + } + return tableModifiersBuilder_; + } + + private int requestedStreams_; + + /** + * + * + *
+       * Initial number of streams. If unset or 0, we will
+       * provide a value of streams so as to produce reasonable throughput. Must be
+       * non-negative. The number of streams may be lower than the requested number,
+       * depending on the amount parallelism that is reasonable for the table and
+       * the maximum amount of parallelism allowed by the system.
+       *
+       * Streams must be read starting from offset 0.
+       * 
+ * + * int32 requested_streams = 3; + * + * @return The requestedStreams. + */ + @java.lang.Override + public int getRequestedStreams() { + return requestedStreams_; + } + + /** + * + * + *
+       * Initial number of streams. If unset or 0, we will
+       * provide a value of streams so as to produce reasonable throughput. Must be
+       * non-negative. The number of streams may be lower than the requested number,
+       * depending on the amount parallelism that is reasonable for the table and
+       * the maximum amount of parallelism allowed by the system.
+       *
+       * Streams must be read starting from offset 0.
+       * 
+ * + * int32 requested_streams = 3; + * + * @param value The requestedStreams to set. + * @return This builder for chaining. + */ + public Builder setRequestedStreams(int value) { + + requestedStreams_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+       * Initial number of streams. If unset or 0, we will
+       * provide a value of streams so as to produce reasonable throughput. Must be
+       * non-negative. The number of streams may be lower than the requested number,
+       * depending on the amount parallelism that is reasonable for the table and
+       * the maximum amount of parallelism allowed by the system.
+       *
+       * Streams must be read starting from offset 0.
+       * 
+ * + * int32 requested_streams = 3; + * + * @return This builder for chaining. + */ + public Builder clearRequestedStreams() { + bitField0_ = (bitField0_ & ~0x00000008); + requestedStreams_ = 0; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions readOptions_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions, + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions.Builder, + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptionsOrBuilder> + readOptionsBuilder_; + + /** + * + * + *
+       * Read options for this session (e.g. column selection, filters).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + * + * @return Whether the readOptions field is set. + */ + public boolean hasReadOptions() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+       * Read options for this session (e.g. column selection, filters).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + * + * @return The readOptions. + */ + public com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + getReadOptions() { + if (readOptionsBuilder_ == null) { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + .getDefaultInstance() + : readOptions_; + } else { + return readOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Read options for this session (e.g. column selection, filters).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + */ + public Builder setReadOptions( + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions value) { + if (readOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readOptions_ = value; + } else { + readOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+       * Read options for this session (e.g. column selection, filters).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + */ + public Builder setReadOptions( + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions.Builder + builderForValue) { + if (readOptionsBuilder_ == null) { + readOptions_ = builderForValue.build(); + } else { + readOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+       * Read options for this session (e.g. column selection, filters).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + */ + public Builder mergeReadOptions( + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions value) { + if (readOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && readOptions_ != null + && readOptions_ + != com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + .getDefaultInstance()) { + getReadOptionsBuilder().mergeFrom(value); + } else { + readOptions_ = value; + } + } else { + readOptionsBuilder_.mergeFrom(value); + } + if (readOptions_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Read options for this session (e.g. column selection, filters).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + */ + public Builder clearReadOptions() { + bitField0_ = (bitField0_ & ~0x00000010); + readOptions_ = null; + if (readOptionsBuilder_ != null) { + readOptionsBuilder_.dispose(); + readOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Read options for this session (e.g. column selection, filters).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + */ + public com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions.Builder + getReadOptionsBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getReadOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Read options for this session (e.g. column selection, filters).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + */ + public com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptionsOrBuilder + getReadOptionsOrBuilder() { + if (readOptionsBuilder_ != null) { + return readOptionsBuilder_.getMessageOrBuilder(); + } else { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions + .getDefaultInstance() + : readOptions_; + } + } + + /** + * + * + *
+       * Read options for this session (e.g. column selection, filters).
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.TableReadOptions read_options = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions, + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions.Builder, + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptionsOrBuilder> + getReadOptionsFieldBuilder() { + if (readOptionsBuilder_ == null) { + readOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions, + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions.Builder, + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptionsOrBuilder>( + getReadOptions(), getParentForChildren(), isClean()); + readOptions_ = null; + } + return readOptionsBuilder_; + } + + private int format_ = 0; + + /** + * + * + *
+       * Data output format. Currently default to Avro.
+       * DATA_FORMAT_UNSPECIFIED not supported.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.DataFormat format = 5; + * + * @return The enum numeric value on the wire for format. + */ + @java.lang.Override + public int getFormatValue() { + return format_; + } + + /** + * + * + *
+       * Data output format. Currently default to Avro.
+       * DATA_FORMAT_UNSPECIFIED not supported.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.DataFormat format = 5; + * + * @param value The enum numeric value on the wire for format to set. + * @return This builder for chaining. + */ + public Builder setFormatValue(int value) { + format_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+       * Data output format. Currently default to Avro.
+       * DATA_FORMAT_UNSPECIFIED not supported.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.DataFormat format = 5; + * + * @return The format. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat getFormat() { + com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat result = + com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat.forNumber(format_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat.UNRECOGNIZED + : result; + } + + /** + * + * + *
+       * Data output format. Currently default to Avro.
+       * DATA_FORMAT_UNSPECIFIED not supported.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.DataFormat format = 5; + * + * @param value The format to set. + * @return This builder for chaining. + */ + public Builder setFormat(com.google.cloud.bigquery.storage.v1beta1.Storage.DataFormat value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + format_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+       * Data output format. Currently default to Avro.
+       * DATA_FORMAT_UNSPECIFIED not supported.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.DataFormat format = 5; + * + * @return This builder for chaining. + */ + public Builder clearFormat() { + bitField0_ = (bitField0_ & ~0x00000020); + format_ = 0; + onChanged(); + return this; + } + + private int shardingStrategy_ = 0; + + /** + * + * + *
+       * The strategy to use for distributing data among multiple streams. Currently
+       * defaults to liquid sharding.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 7; + * + * @return The enum numeric value on the wire for shardingStrategy. + */ + @java.lang.Override + public int getShardingStrategyValue() { + return shardingStrategy_; + } + + /** + * + * + *
+       * The strategy to use for distributing data among multiple streams. Currently
+       * defaults to liquid sharding.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 7; + * + * @param value The enum numeric value on the wire for shardingStrategy to set. + * @return This builder for chaining. + */ + public Builder setShardingStrategyValue(int value) { + shardingStrategy_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+       * The strategy to use for distributing data among multiple streams. Currently
+       * defaults to liquid sharding.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 7; + * + * @return The shardingStrategy. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy + getShardingStrategy() { + com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy result = + com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy.forNumber( + shardingStrategy_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy.UNRECOGNIZED + : result; + } + + /** + * + * + *
+       * The strategy to use for distributing data among multiple streams. Currently
+       * defaults to liquid sharding.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 7; + * + * @param value The shardingStrategy to set. + * @return This builder for chaining. + */ + public Builder setShardingStrategy( + com.google.cloud.bigquery.storage.v1beta1.Storage.ShardingStrategy value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000040; + shardingStrategy_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+       * The strategy to use for distributing data among multiple streams. Currently
+       * defaults to liquid sharding.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ShardingStrategy sharding_strategy = 7; + * + * @return This builder for chaining. + */ + public Builder clearShardingStrategy() { + bitField0_ = (bitField0_ & ~0x00000040); + shardingStrategy_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateReadSessionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.CreateReadSessionRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ReadRowsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.ReadRowsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. Identifier of the position in the stream to start reading from.
+     * The offset requested must be less than the last row read from ReadRows.
+     * Requesting a larger offset is undefined.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the readPosition field is set. + */ + boolean hasReadPosition(); + + /** + * + * + *
+     * Required. Identifier of the position in the stream to start reading from.
+     * The offset requested must be less than the last row read from ReadRows.
+     * Requesting a larger offset is undefined.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The readPosition. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition getReadPosition(); + + /** + * + * + *
+     * Required. Identifier of the position in the stream to start reading from.
+     * The offset requested must be less than the last row read from ReadRows.
+     * Requesting a larger offset is undefined.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPositionOrBuilder + getReadPositionOrBuilder(); + } + + /** + * + * + *
+   * Requesting row data via `ReadRows` must provide Stream position information.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.ReadRowsRequest} + */ + public static final class ReadRowsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.ReadRowsRequest) + ReadRowsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadRowsRequest.newBuilder() to construct. + private ReadRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadRowsRequest() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadRowsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest.Builder.class); + } + + private int bitField0_; + public static final int READ_POSITION_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition readPosition_; + + /** + * + * + *
+     * Required. Identifier of the position in the stream to start reading from.
+     * The offset requested must be less than the last row read from ReadRows.
+     * Requesting a larger offset is undefined.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the readPosition field is set. + */ + @java.lang.Override + public boolean hasReadPosition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. Identifier of the position in the stream to start reading from.
+     * The offset requested must be less than the last row read from ReadRows.
+     * Requesting a larger offset is undefined.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The readPosition. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition getReadPosition() { + return readPosition_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition.getDefaultInstance() + : readPosition_; + } + + /** + * + * + *
+     * Required. Identifier of the position in the stream to start reading from.
+     * The offset requested must be less than the last row read from ReadRows.
+     * Requesting a larger offset is undefined.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPositionOrBuilder + getReadPositionOrBuilder() { + return readPosition_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition.getDefaultInstance() + : readPosition_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getReadPosition()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getReadPosition()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest) obj; + + if (hasReadPosition() != other.hasReadPosition()) return false; + if (hasReadPosition()) { + if (!getReadPosition().equals(other.getReadPosition())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasReadPosition()) { + hash = (37 * hash) + READ_POSITION_FIELD_NUMBER; + hash = (53 * hash) + getReadPosition().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Requesting row data via `ReadRows` must provide Stream position information.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.ReadRowsRequest} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.ReadRowsRequest) + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getReadPositionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + readPosition_ = null; + if (readPositionBuilder_ != null) { + readPositionBuilder_.dispose(); + readPositionBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest result = + new com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.readPosition_ = + readPositionBuilder_ == null ? readPosition_ : readPositionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest + .getDefaultInstance()) return this; + if (other.hasReadPosition()) { + mergeReadPosition(other.getReadPosition()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getReadPositionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition readPosition_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPositionOrBuilder> + readPositionBuilder_; + + /** + * + * + *
+       * Required. Identifier of the position in the stream to start reading from.
+       * The offset requested must be less than the last row read from ReadRows.
+       * Requesting a larger offset is undefined.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the readPosition field is set. + */ + public boolean hasReadPosition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Required. Identifier of the position in the stream to start reading from.
+       * The offset requested must be less than the last row read from ReadRows.
+       * Requesting a larger offset is undefined.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The readPosition. + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition getReadPosition() { + if (readPositionBuilder_ == null) { + return readPosition_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition + .getDefaultInstance() + : readPosition_; + } else { + return readPositionBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Required. Identifier of the position in the stream to start reading from.
+       * The offset requested must be less than the last row read from ReadRows.
+       * Requesting a larger offset is undefined.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReadPosition( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition value) { + if (readPositionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readPosition_ = value; + } else { + readPositionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Identifier of the position in the stream to start reading from.
+       * The offset requested must be less than the last row read from ReadRows.
+       * Requesting a larger offset is undefined.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReadPosition( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition.Builder + builderForValue) { + if (readPositionBuilder_ == null) { + readPosition_ = builderForValue.build(); + } else { + readPositionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Identifier of the position in the stream to start reading from.
+       * The offset requested must be less than the last row read from ReadRows.
+       * Requesting a larger offset is undefined.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeReadPosition( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition value) { + if (readPositionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && readPosition_ != null + && readPosition_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition + .getDefaultInstance()) { + getReadPositionBuilder().mergeFrom(value); + } else { + readPosition_ = value; + } + } else { + readPositionBuilder_.mergeFrom(value); + } + if (readPosition_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Required. Identifier of the position in the stream to start reading from.
+       * The offset requested must be less than the last row read from ReadRows.
+       * Requesting a larger offset is undefined.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearReadPosition() { + bitField0_ = (bitField0_ & ~0x00000001); + readPosition_ = null; + if (readPositionBuilder_ != null) { + readPositionBuilder_.dispose(); + readPositionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Identifier of the position in the stream to start reading from.
+       * The offset requested must be less than the last row read from ReadRows.
+       * Requesting a larger offset is undefined.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition.Builder + getReadPositionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getReadPositionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Required. Identifier of the position in the stream to start reading from.
+       * The offset requested must be less than the last row read from ReadRows.
+       * Requesting a larger offset is undefined.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPositionOrBuilder + getReadPositionOrBuilder() { + if (readPositionBuilder_ != null) { + return readPositionBuilder_.getMessageOrBuilder(); + } else { + return readPosition_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition + .getDefaultInstance() + : readPosition_; + } + } + + /** + * + * + *
+       * Required. Identifier of the position in the stream to start reading from.
+       * The offset requested must be less than the last row read from ReadRows.
+       * Requesting a larger offset is undefined.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.StreamPosition read_position = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPositionOrBuilder> + getReadPositionFieldBuilder() { + if (readPositionBuilder_ == null) { + readPositionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPositionOrBuilder>( + getReadPosition(), getParentForChildren(), isClean()); + readPosition_ = null; + } + return readPositionBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.ReadRowsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.ReadRowsRequest) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadRowsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface StreamStatusOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.StreamStatus) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Number of estimated rows in the current stream. May change over time as
+     * different readers in the stream progress at rates which are relatively fast
+     * or slow.
+     * 
+ * + * int64 estimated_row_count = 1; + * + * @return The estimatedRowCount. + */ + long getEstimatedRowCount(); + + /** + * + * + *
+     * A value in the range [0.0, 1.0] that represents the fraction of rows
+     * assigned to this stream that have been processed by the server. In the
+     * presence of read filters, the server may process more rows than it returns,
+     * so this value reflects progress through the pre-filtering rows.
+     *
+     * This value is only populated for sessions created through the BALANCED
+     * sharding strategy.
+     * 
+ * + * float fraction_consumed = 2; + * + * @return The fractionConsumed. + */ + float getFractionConsumed(); + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + * + * @return The progress. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress getProgress(); + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.ProgressOrBuilder getProgressOrBuilder(); + + /** + * + * + *
+     * Whether this stream can be split. For sessions that use the LIQUID sharding
+     * strategy, this value is always false. For BALANCED sessions, this value is
+     * false when enough data have been read such that no more splits are possible
+     * at that point or beyond. For small tables or streams that are the result of
+     * a chain of splits, this value may never be true.
+     * 
+ * + * bool is_splittable = 3; + * + * @return The isSplittable. + */ + boolean getIsSplittable(); + } + + /** + * + * + *
+   * Progress information for a given Stream.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.StreamStatus} + */ + public static final class StreamStatus extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.StreamStatus) + StreamStatusOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StreamStatus.newBuilder() to construct. + private StreamStatus(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamStatus() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StreamStatus(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_StreamStatus_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_StreamStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.Builder.class); + } + + private int bitField0_; + public static final int ESTIMATED_ROW_COUNT_FIELD_NUMBER = 1; + private long estimatedRowCount_ = 0L; + + /** + * + * + *
+     * Number of estimated rows in the current stream. May change over time as
+     * different readers in the stream progress at rates which are relatively fast
+     * or slow.
+     * 
+ * + * int64 estimated_row_count = 1; + * + * @return The estimatedRowCount. + */ + @java.lang.Override + public long getEstimatedRowCount() { + return estimatedRowCount_; + } + + public static final int FRACTION_CONSUMED_FIELD_NUMBER = 2; + private float fractionConsumed_ = 0F; + + /** + * + * + *
+     * A value in the range [0.0, 1.0] that represents the fraction of rows
+     * assigned to this stream that have been processed by the server. In the
+     * presence of read filters, the server may process more rows than it returns,
+     * so this value reflects progress through the pre-filtering rows.
+     *
+     * This value is only populated for sessions created through the BALANCED
+     * sharding strategy.
+     * 
+ * + * float fraction_consumed = 2; + * + * @return The fractionConsumed. + */ + @java.lang.Override + public float getFractionConsumed() { + return fractionConsumed_; + } + + public static final int PROGRESS_FIELD_NUMBER = 4; + private com.google.cloud.bigquery.storage.v1beta1.Storage.Progress progress_; + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + * + * @return Whether the progress field is set. + */ + @java.lang.Override + public boolean hasProgress() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + * + * @return The progress. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Progress getProgress() { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.getDefaultInstance() + : progress_; + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ProgressOrBuilder + getProgressOrBuilder() { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.getDefaultInstance() + : progress_; + } + + public static final int IS_SPLITTABLE_FIELD_NUMBER = 3; + private boolean isSplittable_ = false; + + /** + * + * + *
+     * Whether this stream can be split. For sessions that use the LIQUID sharding
+     * strategy, this value is always false. For BALANCED sessions, this value is
+     * false when enough data have been read such that no more splits are possible
+     * at that point or beyond. For small tables or streams that are the result of
+     * a chain of splits, this value may never be true.
+     * 
+ * + * bool is_splittable = 3; + * + * @return The isSplittable. + */ + @java.lang.Override + public boolean getIsSplittable() { + return isSplittable_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (estimatedRowCount_ != 0L) { + output.writeInt64(1, estimatedRowCount_); + } + if (java.lang.Float.floatToRawIntBits(fractionConsumed_) != 0) { + output.writeFloat(2, fractionConsumed_); + } + if (isSplittable_ != false) { + output.writeBool(3, isSplittable_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getProgress()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (estimatedRowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, estimatedRowCount_); + } + if (java.lang.Float.floatToRawIntBits(fractionConsumed_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeFloatSize(2, fractionConsumed_); + } + if (isSplittable_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, isSplittable_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getProgress()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus) obj; + + if (getEstimatedRowCount() != other.getEstimatedRowCount()) return false; + if (java.lang.Float.floatToIntBits(getFractionConsumed()) + != java.lang.Float.floatToIntBits(other.getFractionConsumed())) return false; + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (getIsSplittable() != other.getIsSplittable()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ESTIMATED_ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getEstimatedRowCount()); + hash = (37 * hash) + FRACTION_CONSUMED_FIELD_NUMBER; + hash = (53 * hash) + java.lang.Float.floatToIntBits(getFractionConsumed()); + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + hash = (37 * hash) + IS_SPLITTABLE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIsSplittable()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Progress information for a given Stream.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.StreamStatus} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.StreamStatus) + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatusOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_StreamStatus_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_StreamStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getProgressFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + estimatedRowCount_ = 0L; + fractionConsumed_ = 0F; + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + isSplittable_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_StreamStatus_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus result = + new com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.estimatedRowCount_ = estimatedRowCount_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.fractionConsumed_ = fractionConsumed_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.progress_ = progressBuilder_ == null ? progress_ : progressBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.isSplittable_ = isSplittable_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.getDefaultInstance()) + return this; + if (other.getEstimatedRowCount() != 0L) { + setEstimatedRowCount(other.getEstimatedRowCount()); + } + if (other.getFractionConsumed() != 0F) { + setFractionConsumed(other.getFractionConsumed()); + } + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + if (other.getIsSplittable() != false) { + setIsSplittable(other.getIsSplittable()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + estimatedRowCount_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 21: + { + fractionConsumed_ = input.readFloat(); + bitField0_ |= 0x00000002; + break; + } // case 21 + case 24: + { + isSplittable_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 24 + case 34: + { + input.readMessage(getProgressFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long estimatedRowCount_; + + /** + * + * + *
+       * Number of estimated rows in the current stream. May change over time as
+       * different readers in the stream progress at rates which are relatively fast
+       * or slow.
+       * 
+ * + * int64 estimated_row_count = 1; + * + * @return The estimatedRowCount. + */ + @java.lang.Override + public long getEstimatedRowCount() { + return estimatedRowCount_; + } + + /** + * + * + *
+       * Number of estimated rows in the current stream. May change over time as
+       * different readers in the stream progress at rates which are relatively fast
+       * or slow.
+       * 
+ * + * int64 estimated_row_count = 1; + * + * @param value The estimatedRowCount to set. + * @return This builder for chaining. + */ + public Builder setEstimatedRowCount(long value) { + + estimatedRowCount_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Number of estimated rows in the current stream. May change over time as
+       * different readers in the stream progress at rates which are relatively fast
+       * or slow.
+       * 
+ * + * int64 estimated_row_count = 1; + * + * @return This builder for chaining. + */ + public Builder clearEstimatedRowCount() { + bitField0_ = (bitField0_ & ~0x00000001); + estimatedRowCount_ = 0L; + onChanged(); + return this; + } + + private float fractionConsumed_; + + /** + * + * + *
+       * A value in the range [0.0, 1.0] that represents the fraction of rows
+       * assigned to this stream that have been processed by the server. In the
+       * presence of read filters, the server may process more rows than it returns,
+       * so this value reflects progress through the pre-filtering rows.
+       *
+       * This value is only populated for sessions created through the BALANCED
+       * sharding strategy.
+       * 
+ * + * float fraction_consumed = 2; + * + * @return The fractionConsumed. + */ + @java.lang.Override + public float getFractionConsumed() { + return fractionConsumed_; + } + + /** + * + * + *
+       * A value in the range [0.0, 1.0] that represents the fraction of rows
+       * assigned to this stream that have been processed by the server. In the
+       * presence of read filters, the server may process more rows than it returns,
+       * so this value reflects progress through the pre-filtering rows.
+       *
+       * This value is only populated for sessions created through the BALANCED
+       * sharding strategy.
+       * 
+ * + * float fraction_consumed = 2; + * + * @param value The fractionConsumed to set. + * @return This builder for chaining. + */ + public Builder setFractionConsumed(float value) { + + fractionConsumed_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * A value in the range [0.0, 1.0] that represents the fraction of rows
+       * assigned to this stream that have been processed by the server. In the
+       * presence of read filters, the server may process more rows than it returns,
+       * so this value reflects progress through the pre-filtering rows.
+       *
+       * This value is only populated for sessions created through the BALANCED
+       * sharding strategy.
+       * 
+ * + * float fraction_consumed = 2; + * + * @return This builder for chaining. + */ + public Builder clearFractionConsumed() { + bitField0_ = (bitField0_ & ~0x00000002); + fractionConsumed_ = 0F; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta1.Storage.Progress progress_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress, + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.ProgressOrBuilder> + progressBuilder_; + + /** + * + * + *
+       * Represents the progress of the current stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+       * Represents the progress of the current stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + * + * @return The progress. + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Progress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Represents the progress of the current stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + */ + public Builder setProgress(com.google.cloud.bigquery.storage.v1beta1.Storage.Progress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + } else { + progressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Represents the progress of the current stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + */ + public Builder setProgress( + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Represents the progress of the current stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + */ + public Builder mergeProgress( + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress value) { + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && progress_ != null + && progress_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.Progress + .getDefaultInstance()) { + getProgressBuilder().mergeFrom(value); + } else { + progress_ = value; + } + } else { + progressBuilder_.mergeFrom(value); + } + if (progress_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Represents the progress of the current stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000004); + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Represents the progress of the current stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.Builder + getProgressBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getProgressFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Represents the progress of the current stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.ProgressOrBuilder + getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.getDefaultInstance() + : progress_; + } + } + + /** + * + * + *
+       * Represents the progress of the current stream.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Progress progress = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress, + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.ProgressOrBuilder> + getProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress, + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.ProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + private boolean isSplittable_; + + /** + * + * + *
+       * Whether this stream can be split. For sessions that use the LIQUID sharding
+       * strategy, this value is always false. For BALANCED sessions, this value is
+       * false when enough data have been read such that no more splits are possible
+       * at that point or beyond. For small tables or streams that are the result of
+       * a chain of splits, this value may never be true.
+       * 
+ * + * bool is_splittable = 3; + * + * @return The isSplittable. + */ + @java.lang.Override + public boolean getIsSplittable() { + return isSplittable_; + } + + /** + * + * + *
+       * Whether this stream can be split. For sessions that use the LIQUID sharding
+       * strategy, this value is always false. For BALANCED sessions, this value is
+       * false when enough data have been read such that no more splits are possible
+       * at that point or beyond. For small tables or streams that are the result of
+       * a chain of splits, this value may never be true.
+       * 
+ * + * bool is_splittable = 3; + * + * @param value The isSplittable to set. + * @return This builder for chaining. + */ + public Builder setIsSplittable(boolean value) { + + isSplittable_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+       * Whether this stream can be split. For sessions that use the LIQUID sharding
+       * strategy, this value is always false. For BALANCED sessions, this value is
+       * false when enough data have been read such that no more splits are possible
+       * at that point or beyond. For small tables or streams that are the result of
+       * a chain of splits, this value may never be true.
+       * 
+ * + * bool is_splittable = 3; + * + * @return This builder for chaining. + */ + public Builder clearIsSplittable() { + bitField0_ = (bitField0_ & ~0x00000008); + isSplittable_ = false; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.StreamStatus) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.StreamStatus) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StreamStatus parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ProgressOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.Progress) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The fraction of rows assigned to the stream that have been processed by the
+     * server so far, not including the rows in the current response message.
+     *
+     * This value, along with `at_response_end`, can be used to interpolate the
+     * progress made as the rows in the message are being processed using the
+     * following formula: `at_response_start + (at_response_end -
+     * at_response_start) * rows_processed_from_response / rows_in_response`.
+     *
+     * Note that if a filter is provided, the `at_response_end` value of the
+     * previous response may not necessarily be equal to the `at_response_start`
+     * value of the current response.
+     * 
+ * + * float at_response_start = 1; + * + * @return The atResponseStart. + */ + float getAtResponseStart(); + + /** + * + * + *
+     * Similar to `at_response_start`, except that this value includes the rows in
+     * the current response.
+     * 
+ * + * float at_response_end = 2; + * + * @return The atResponseEnd. + */ + float getAtResponseEnd(); + } + + /** Protobuf type {@code google.cloud.bigquery.storage.v1beta1.Progress} */ + public static final class Progress extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.Progress) + ProgressOrBuilder { + private static final long serialVersionUID = 0L; + + // Use Progress.newBuilder() to construct. + private Progress(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Progress() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new Progress(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_Progress_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_Progress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.Builder.class); + } + + public static final int AT_RESPONSE_START_FIELD_NUMBER = 1; + private float atResponseStart_ = 0F; + + /** + * + * + *
+     * The fraction of rows assigned to the stream that have been processed by the
+     * server so far, not including the rows in the current response message.
+     *
+     * This value, along with `at_response_end`, can be used to interpolate the
+     * progress made as the rows in the message are being processed using the
+     * following formula: `at_response_start + (at_response_end -
+     * at_response_start) * rows_processed_from_response / rows_in_response`.
+     *
+     * Note that if a filter is provided, the `at_response_end` value of the
+     * previous response may not necessarily be equal to the `at_response_start`
+     * value of the current response.
+     * 
+ * + * float at_response_start = 1; + * + * @return The atResponseStart. + */ + @java.lang.Override + public float getAtResponseStart() { + return atResponseStart_; + } + + public static final int AT_RESPONSE_END_FIELD_NUMBER = 2; + private float atResponseEnd_ = 0F; + + /** + * + * + *
+     * Similar to `at_response_start`, except that this value includes the rows in
+     * the current response.
+     * 
+ * + * float at_response_end = 2; + * + * @return The atResponseEnd. + */ + @java.lang.Override + public float getAtResponseEnd() { + return atResponseEnd_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (java.lang.Float.floatToRawIntBits(atResponseStart_) != 0) { + output.writeFloat(1, atResponseStart_); + } + if (java.lang.Float.floatToRawIntBits(atResponseEnd_) != 0) { + output.writeFloat(2, atResponseEnd_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (java.lang.Float.floatToRawIntBits(atResponseStart_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeFloatSize(1, atResponseStart_); + } + if (java.lang.Float.floatToRawIntBits(atResponseEnd_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeFloatSize(2, atResponseEnd_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.Progress)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.Progress) obj; + + if (java.lang.Float.floatToIntBits(getAtResponseStart()) + != java.lang.Float.floatToIntBits(other.getAtResponseStart())) return false; + if (java.lang.Float.floatToIntBits(getAtResponseEnd()) + != java.lang.Float.floatToIntBits(other.getAtResponseEnd())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + AT_RESPONSE_START_FIELD_NUMBER; + hash = (53 * hash) + java.lang.Float.floatToIntBits(getAtResponseStart()); + hash = (37 * hash) + AT_RESPONSE_END_FIELD_NUMBER; + hash = (53 * hash) + java.lang.Float.floatToIntBits(getAtResponseEnd()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code google.cloud.bigquery.storage.v1beta1.Progress} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.Progress) + com.google.cloud.bigquery.storage.v1beta1.Storage.ProgressOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_Progress_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_Progress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + atResponseStart_ = 0F; + atResponseEnd_ = 0F; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_Progress_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Progress + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Progress build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Progress buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress result = + new com.google.cloud.bigquery.storage.v1beta1.Storage.Progress(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.Progress result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.atResponseStart_ = atResponseStart_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.atResponseEnd_ = atResponseEnd_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.Progress) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta1.Storage.Progress) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta1.Storage.Progress other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage.Progress.getDefaultInstance()) + return this; + if (other.getAtResponseStart() != 0F) { + setAtResponseStart(other.getAtResponseStart()); + } + if (other.getAtResponseEnd() != 0F) { + setAtResponseEnd(other.getAtResponseEnd()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 13: + { + atResponseStart_ = input.readFloat(); + bitField0_ |= 0x00000001; + break; + } // case 13 + case 21: + { + atResponseEnd_ = input.readFloat(); + bitField0_ |= 0x00000002; + break; + } // case 21 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private float atResponseStart_; + + /** + * + * + *
+       * The fraction of rows assigned to the stream that have been processed by the
+       * server so far, not including the rows in the current response message.
+       *
+       * This value, along with `at_response_end`, can be used to interpolate the
+       * progress made as the rows in the message are being processed using the
+       * following formula: `at_response_start + (at_response_end -
+       * at_response_start) * rows_processed_from_response / rows_in_response`.
+       *
+       * Note that if a filter is provided, the `at_response_end` value of the
+       * previous response may not necessarily be equal to the `at_response_start`
+       * value of the current response.
+       * 
+ * + * float at_response_start = 1; + * + * @return The atResponseStart. + */ + @java.lang.Override + public float getAtResponseStart() { + return atResponseStart_; + } + + /** + * + * + *
+       * The fraction of rows assigned to the stream that have been processed by the
+       * server so far, not including the rows in the current response message.
+       *
+       * This value, along with `at_response_end`, can be used to interpolate the
+       * progress made as the rows in the message are being processed using the
+       * following formula: `at_response_start + (at_response_end -
+       * at_response_start) * rows_processed_from_response / rows_in_response`.
+       *
+       * Note that if a filter is provided, the `at_response_end` value of the
+       * previous response may not necessarily be equal to the `at_response_start`
+       * value of the current response.
+       * 
+ * + * float at_response_start = 1; + * + * @param value The atResponseStart to set. + * @return This builder for chaining. + */ + public Builder setAtResponseStart(float value) { + + atResponseStart_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The fraction of rows assigned to the stream that have been processed by the
+       * server so far, not including the rows in the current response message.
+       *
+       * This value, along with `at_response_end`, can be used to interpolate the
+       * progress made as the rows in the message are being processed using the
+       * following formula: `at_response_start + (at_response_end -
+       * at_response_start) * rows_processed_from_response / rows_in_response`.
+       *
+       * Note that if a filter is provided, the `at_response_end` value of the
+       * previous response may not necessarily be equal to the `at_response_start`
+       * value of the current response.
+       * 
+ * + * float at_response_start = 1; + * + * @return This builder for chaining. + */ + public Builder clearAtResponseStart() { + bitField0_ = (bitField0_ & ~0x00000001); + atResponseStart_ = 0F; + onChanged(); + return this; + } + + private float atResponseEnd_; + + /** + * + * + *
+       * Similar to `at_response_start`, except that this value includes the rows in
+       * the current response.
+       * 
+ * + * float at_response_end = 2; + * + * @return The atResponseEnd. + */ + @java.lang.Override + public float getAtResponseEnd() { + return atResponseEnd_; + } + + /** + * + * + *
+       * Similar to `at_response_start`, except that this value includes the rows in
+       * the current response.
+       * 
+ * + * float at_response_end = 2; + * + * @param value The atResponseEnd to set. + * @return This builder for chaining. + */ + public Builder setAtResponseEnd(float value) { + + atResponseEnd_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Similar to `at_response_start`, except that this value includes the rows in
+       * the current response.
+       * 
+ * + * float at_response_end = 2; + * + * @return This builder for chaining. + */ + public Builder clearAtResponseEnd() { + bitField0_ = (bitField0_ & ~0x00000002); + atResponseEnd_ = 0F; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.Progress) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.Progress) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage.Progress + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta1.Storage.Progress(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.Progress getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Progress parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Progress getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ThrottleStatusOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.ThrottleStatus) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * How much this connection is being throttled.
+     * 0 is no throttling, 100 is completely throttled.
+     * 
+ * + * int32 throttle_percent = 1; + * + * @return The throttlePercent. + */ + int getThrottlePercent(); + } + + /** + * + * + *
+   * Information on if the current connection is being throttled.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.ThrottleStatus} + */ + public static final class ThrottleStatus extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.ThrottleStatus) + ThrottleStatusOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ThrottleStatus.newBuilder() to construct. + private ThrottleStatus(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ThrottleStatus() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ThrottleStatus(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ThrottleStatus_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ThrottleStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.Builder.class); + } + + public static final int THROTTLE_PERCENT_FIELD_NUMBER = 1; + private int throttlePercent_ = 0; + + /** + * + * + *
+     * How much this connection is being throttled.
+     * 0 is no throttling, 100 is completely throttled.
+     * 
+ * + * int32 throttle_percent = 1; + * + * @return The throttlePercent. + */ + @java.lang.Override + public int getThrottlePercent() { + return throttlePercent_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (throttlePercent_ != 0) { + output.writeInt32(1, throttlePercent_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (throttlePercent_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, throttlePercent_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus) obj; + + if (getThrottlePercent() != other.getThrottlePercent()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + THROTTLE_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getThrottlePercent(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Information on if the current connection is being throttled.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.ThrottleStatus} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.ThrottleStatus) + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatusOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ThrottleStatus_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ThrottleStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + throttlePercent_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ThrottleStatus_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus result = + new com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.throttlePercent_ = throttlePercent_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus + .getDefaultInstance()) return this; + if (other.getThrottlePercent() != 0) { + setThrottlePercent(other.getThrottlePercent()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + throttlePercent_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int throttlePercent_; + + /** + * + * + *
+       * How much this connection is being throttled.
+       * 0 is no throttling, 100 is completely throttled.
+       * 
+ * + * int32 throttle_percent = 1; + * + * @return The throttlePercent. + */ + @java.lang.Override + public int getThrottlePercent() { + return throttlePercent_; + } + + /** + * + * + *
+       * How much this connection is being throttled.
+       * 0 is no throttling, 100 is completely throttled.
+       * 
+ * + * int32 throttle_percent = 1; + * + * @param value The throttlePercent to set. + * @return This builder for chaining. + */ + public Builder setThrottlePercent(int value) { + + throttlePercent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * How much this connection is being throttled.
+       * 0 is no throttling, 100 is completely throttled.
+       * 
+ * + * int32 throttle_percent = 1; + * + * @return This builder for chaining. + */ + public Builder clearThrottlePercent() { + bitField0_ = (bitField0_ & ~0x00000001); + throttlePercent_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.ThrottleStatus) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.ThrottleStatus) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ThrottleStatus parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ReadRowsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.ReadRowsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + * + * @return Whether the avroRows field is set. + */ + boolean hasAvroRows(); + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + * + * @return The avroRows. + */ + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows getAvroRows(); + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + */ + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRowsOrBuilder getAvroRowsOrBuilder(); + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + * @return Whether the arrowRecordBatch field is set. + */ + boolean hasArrowRecordBatch(); + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + * @return The arrowRecordBatch. + */ + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch getArrowRecordBatch(); + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + */ + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatchOrBuilder + getArrowRecordBatchOrBuilder(); + + /** + * + * + *
+     * Number of serialized rows in the rows block. This value is recorded here,
+     * in addition to the row_count values in the output-specific messages in
+     * `rows`, so that code which needs to record progress through the stream can
+     * do so in an output format-independent way.
+     * 
+ * + * int64 row_count = 6; + * + * @return The rowCount. + */ + long getRowCount(); + + /** + * + * + *
+     * Estimated stream statistics.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + * + * @return Whether the status field is set. + */ + boolean hasStatus(); + + /** + * + * + *
+     * Estimated stream statistics.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + * + * @return The status. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus getStatus(); + + /** + * + * + *
+     * Estimated stream statistics.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatusOrBuilder getStatusOrBuilder(); + + /** + * + * + *
+     * Throttling status. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + * + * @return Whether the throttleStatus field is set. + */ + boolean hasThrottleStatus(); + + /** + * + * + *
+     * Throttling status. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + * + * @return The throttleStatus. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus getThrottleStatus(); + + /** + * + * + *
+     * Throttling status. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatusOrBuilder + getThrottleStatusOrBuilder(); + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + boolean hasAvroSchema(); + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema getAvroSchema(); + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder + getAvroSchemaOrBuilder(); + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + boolean hasArrowSchema(); + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema getArrowSchema(); + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder + getArrowSchemaOrBuilder(); + + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse.RowsCase getRowsCase(); + + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse.SchemaCase getSchemaCase(); + } + + /** + * + * + *
+   * Response from calling `ReadRows` may include row data, progress and
+   * throttling information.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.ReadRowsResponse} + */ + public static final class ReadRowsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.ReadRowsResponse) + ReadRowsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadRowsResponse.newBuilder() to construct. + private ReadRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadRowsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadRowsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse.Builder.class); + } + + private int bitField0_; + private int rowsCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object rows_; + + public enum RowsCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + AVRO_ROWS(3), + ARROW_RECORD_BATCH(4), + ROWS_NOT_SET(0); + private final int value; + + private RowsCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RowsCase valueOf(int value) { + return forNumber(value); + } + + public static RowsCase forNumber(int value) { + switch (value) { + case 3: + return AVRO_ROWS; + case 4: + return ARROW_RECORD_BATCH; + case 0: + return ROWS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + private int schemaCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object schema_; + + public enum SchemaCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + AVRO_SCHEMA(7), + ARROW_SCHEMA(8), + SCHEMA_NOT_SET(0); + private final int value; + + private SchemaCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SchemaCase valueOf(int value) { + return forNumber(value); + } + + public static SchemaCase forNumber(int value) { + switch (value) { + case 7: + return AVRO_SCHEMA; + case 8: + return ARROW_SCHEMA; + case 0: + return SCHEMA_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public static final int AVRO_ROWS_FIELD_NUMBER = 3; + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + * + * @return Whether the avroRows field is set. + */ + @java.lang.Override + public boolean hasAvroRows() { + return rowsCase_ == 3; + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + * + * @return The avroRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows getAvroRows() { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.getDefaultInstance(); + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRowsOrBuilder + getAvroRowsOrBuilder() { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.getDefaultInstance(); + } + + public static final int ARROW_RECORD_BATCH_FIELD_NUMBER = 4; + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + * @return Whether the arrowRecordBatch field is set. + */ + @java.lang.Override + public boolean hasArrowRecordBatch() { + return rowsCase_ == 4; + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + * @return The arrowRecordBatch. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + getArrowRecordBatch() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + .getDefaultInstance(); + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatchOrBuilder + getArrowRecordBatchOrBuilder() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + .getDefaultInstance(); + } + + public static final int ROW_COUNT_FIELD_NUMBER = 6; + private long rowCount_ = 0L; + + /** + * + * + *
+     * Number of serialized rows in the rows block. This value is recorded here,
+     * in addition to the row_count values in the output-specific messages in
+     * `rows`, so that code which needs to record progress through the stream can
+     * do so in an output format-independent way.
+     * 
+ * + * int64 row_count = 6; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + public static final int STATUS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus status_; + + /** + * + * + *
+     * Estimated stream statistics.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + * + * @return Whether the status field is set. + */ + @java.lang.Override + public boolean hasStatus() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Estimated stream statistics.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + * + * @return The status. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus getStatus() { + return status_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.getDefaultInstance() + : status_; + } + + /** + * + * + *
+     * Estimated stream statistics.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatusOrBuilder + getStatusOrBuilder() { + return status_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.getDefaultInstance() + : status_; + } + + public static final int THROTTLE_STATUS_FIELD_NUMBER = 5; + private com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus throttleStatus_; + + /** + * + * + *
+     * Throttling status. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + * + * @return Whether the throttleStatus field is set. + */ + @java.lang.Override + public boolean hasThrottleStatus() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Throttling status. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + * + * @return The throttleStatus. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus getThrottleStatus() { + return throttleStatus_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.getDefaultInstance() + : throttleStatus_; + } + + /** + * + * + *
+     * Throttling status. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatusOrBuilder + getThrottleStatusOrBuilder() { + return throttleStatus_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.getDefaultInstance() + : throttleStatus_; + } + + public static final int AVRO_SCHEMA_FIELD_NUMBER = 7; + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 7; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema getAvroSchema() { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.getDefaultInstance(); + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder + getAvroSchemaOrBuilder() { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.getDefaultInstance(); + } + + public static final int ARROW_SCHEMA_FIELD_NUMBER = 8; + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 8; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema getArrowSchema() { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.getDefaultInstance(); + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder + getArrowSchemaOrBuilder() { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getStatus()); + } + if (rowsCase_ == 3) { + output.writeMessage( + 3, (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows) rows_); + } + if (rowsCase_ == 4) { + output.writeMessage( + 4, (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch) rows_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(5, getThrottleStatus()); + } + if (rowCount_ != 0L) { + output.writeInt64(6, rowCount_); + } + if (schemaCase_ == 7) { + output.writeMessage( + 7, (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_); + } + if (schemaCase_ == 8) { + output.writeMessage( + 8, (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStatus()); + } + if (rowsCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows) rows_); + } + if (rowsCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch) rows_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getThrottleStatus()); + } + if (rowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(6, rowCount_); + } + if (schemaCase_ == 7) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_); + } + if (schemaCase_ == 8) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse) obj; + + if (getRowCount() != other.getRowCount()) return false; + if (hasStatus() != other.hasStatus()) return false; + if (hasStatus()) { + if (!getStatus().equals(other.getStatus())) return false; + } + if (hasThrottleStatus() != other.hasThrottleStatus()) return false; + if (hasThrottleStatus()) { + if (!getThrottleStatus().equals(other.getThrottleStatus())) return false; + } + if (!getRowsCase().equals(other.getRowsCase())) return false; + switch (rowsCase_) { + case 3: + if (!getAvroRows().equals(other.getAvroRows())) return false; + break; + case 4: + if (!getArrowRecordBatch().equals(other.getArrowRecordBatch())) return false; + break; + case 0: + default: + } + if (!getSchemaCase().equals(other.getSchemaCase())) return false; + switch (schemaCase_) { + case 7: + if (!getAvroSchema().equals(other.getAvroSchema())) return false; + break; + case 8: + if (!getArrowSchema().equals(other.getArrowSchema())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); + if (hasStatus()) { + hash = (37 * hash) + STATUS_FIELD_NUMBER; + hash = (53 * hash) + getStatus().hashCode(); + } + if (hasThrottleStatus()) { + hash = (37 * hash) + THROTTLE_STATUS_FIELD_NUMBER; + hash = (53 * hash) + getThrottleStatus().hashCode(); + } + switch (rowsCase_) { + case 3: + hash = (37 * hash) + AVRO_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getAvroRows().hashCode(); + break; + case 4: + hash = (37 * hash) + ARROW_RECORD_BATCH_FIELD_NUMBER; + hash = (53 * hash) + getArrowRecordBatch().hashCode(); + break; + case 0: + default: + } + switch (schemaCase_) { + case 7: + hash = (37 * hash) + AVRO_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getAvroSchema().hashCode(); + break; + case 8: + hash = (37 * hash) + ARROW_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getArrowSchema().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Response from calling `ReadRows` may include row data, progress and
+     * throttling information.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.ReadRowsResponse} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.ReadRowsResponse) + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getStatusFieldBuilder(); + getThrottleStatusFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (avroRowsBuilder_ != null) { + avroRowsBuilder_.clear(); + } + if (arrowRecordBatchBuilder_ != null) { + arrowRecordBatchBuilder_.clear(); + } + rowCount_ = 0L; + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + throttleStatus_ = null; + if (throttleStatusBuilder_ != null) { + throttleStatusBuilder_.dispose(); + throttleStatusBuilder_ = null; + } + if (avroSchemaBuilder_ != null) { + avroSchemaBuilder_.clear(); + } + if (arrowSchemaBuilder_ != null) { + arrowSchemaBuilder_.clear(); + } + rowsCase_ = 0; + rows_ = null; + schemaCase_ = 0; + schema_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse result = + new com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.rowCount_ = rowCount_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.status_ = statusBuilder_ == null ? status_ : statusBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.throttleStatus_ = + throttleStatusBuilder_ == null ? throttleStatus_ : throttleStatusBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse result) { + result.rowsCase_ = rowsCase_; + result.rows_ = this.rows_; + if (rowsCase_ == 3 && avroRowsBuilder_ != null) { + result.rows_ = avroRowsBuilder_.build(); + } + if (rowsCase_ == 4 && arrowRecordBatchBuilder_ != null) { + result.rows_ = arrowRecordBatchBuilder_.build(); + } + result.schemaCase_ = schemaCase_; + result.schema_ = this.schema_; + if (schemaCase_ == 7 && avroSchemaBuilder_ != null) { + result.schema_ = avroSchemaBuilder_.build(); + } + if (schemaCase_ == 8 && arrowSchemaBuilder_ != null) { + result.schema_ = arrowSchemaBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse + .getDefaultInstance()) return this; + if (other.getRowCount() != 0L) { + setRowCount(other.getRowCount()); + } + if (other.hasStatus()) { + mergeStatus(other.getStatus()); + } + if (other.hasThrottleStatus()) { + mergeThrottleStatus(other.getThrottleStatus()); + } + switch (other.getRowsCase()) { + case AVRO_ROWS: + { + mergeAvroRows(other.getAvroRows()); + break; + } + case ARROW_RECORD_BATCH: + { + mergeArrowRecordBatch(other.getArrowRecordBatch()); + break; + } + case ROWS_NOT_SET: + { + break; + } + } + switch (other.getSchemaCase()) { + case AVRO_SCHEMA: + { + mergeAvroSchema(other.getAvroSchema()); + break; + } + case ARROW_SCHEMA: + { + mergeArrowSchema(other.getArrowSchema()); + break; + } + case SCHEMA_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + input.readMessage(getStatusFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 18 + case 26: + { + input.readMessage(getAvroRowsFieldBuilder().getBuilder(), extensionRegistry); + rowsCase_ = 3; + break; + } // case 26 + case 34: + { + input.readMessage( + getArrowRecordBatchFieldBuilder().getBuilder(), extensionRegistry); + rowsCase_ = 4; + break; + } // case 34 + case 42: + { + input.readMessage( + getThrottleStatusFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 48: + { + rowCount_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 48 + case 58: + { + input.readMessage(getAvroSchemaFieldBuilder().getBuilder(), extensionRegistry); + schemaCase_ = 7; + break; + } // case 58 + case 66: + { + input.readMessage(getArrowSchemaFieldBuilder().getBuilder(), extensionRegistry); + schemaCase_ = 8; + break; + } // case 66 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int rowsCase_ = 0; + private java.lang.Object rows_; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public Builder clearRows() { + rowsCase_ = 0; + rows_ = null; + onChanged(); + return this; + } + + private int schemaCase_ = 0; + private java.lang.Object schema_; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public Builder clearSchema() { + schemaCase_ = 0; + schema_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.Builder, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRowsOrBuilder> + avroRowsBuilder_; + + /** + * + * + *
+       * Serialized row data in AVRO format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + * + * @return Whether the avroRows field is set. + */ + @java.lang.Override + public boolean hasAvroRows() { + return rowsCase_ == 3; + } + + /** + * + * + *
+       * Serialized row data in AVRO format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + * + * @return The avroRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows getAvroRows() { + if (avroRowsBuilder_ == null) { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.getDefaultInstance(); + } else { + if (rowsCase_ == 3) { + return avroRowsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.getDefaultInstance(); + } + } + + /** + * + * + *
+       * Serialized row data in AVRO format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + */ + public Builder setAvroRows( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows value) { + if (avroRowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + avroRowsBuilder_.setMessage(value); + } + rowsCase_ = 3; + return this; + } + + /** + * + * + *
+       * Serialized row data in AVRO format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + */ + public Builder setAvroRows( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.Builder builderForValue) { + if (avroRowsBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + avroRowsBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 3; + return this; + } + + /** + * + * + *
+       * Serialized row data in AVRO format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + */ + public Builder mergeAvroRows( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows value) { + if (avroRowsBuilder_ == null) { + if (rowsCase_ == 3 + && rows_ + != com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows + .getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.newBuilder( + (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows) rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 3) { + avroRowsBuilder_.mergeFrom(value); + } else { + avroRowsBuilder_.setMessage(value); + } + } + rowsCase_ = 3; + return this; + } + + /** + * + * + *
+       * Serialized row data in AVRO format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + */ + public Builder clearAvroRows() { + if (avroRowsBuilder_ == null) { + if (rowsCase_ == 3) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 3) { + rowsCase_ = 0; + rows_ = null; + } + avroRowsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Serialized row data in AVRO format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + */ + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.Builder + getAvroRowsBuilder() { + return getAvroRowsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Serialized row data in AVRO format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRowsOrBuilder + getAvroRowsOrBuilder() { + if ((rowsCase_ == 3) && (avroRowsBuilder_ != null)) { + return avroRowsBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.getDefaultInstance(); + } + } + + /** + * + * + *
+       * Serialized row data in AVRO format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.AvroRows avro_rows = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.Builder, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRowsOrBuilder> + getAvroRowsFieldBuilder() { + if (avroRowsBuilder_ == null) { + if (!(rowsCase_ == 3)) { + rows_ = + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.getDefaultInstance(); + } + avroRowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows.Builder, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRowsOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroRows) rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 3; + onChanged(); + return avroRowsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatchOrBuilder> + arrowRecordBatchBuilder_; + + /** + * + * + *
+       * Serialized row data in Arrow RecordBatch format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + * + * @return Whether the arrowRecordBatch field is set. + */ + @java.lang.Override + public boolean hasArrowRecordBatch() { + return rowsCase_ == 4; + } + + /** + * + * + *
+       * Serialized row data in Arrow RecordBatch format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + * + * @return The arrowRecordBatch. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + getArrowRecordBatch() { + if (arrowRecordBatchBuilder_ == null) { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + .getDefaultInstance(); + } else { + if (rowsCase_ == 4) { + return arrowRecordBatchBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Serialized row data in Arrow RecordBatch format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + */ + public Builder setArrowRecordBatch( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch value) { + if (arrowRecordBatchBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + arrowRecordBatchBuilder_.setMessage(value); + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+       * Serialized row data in Arrow RecordBatch format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + */ + public Builder setArrowRecordBatch( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch.Builder + builderForValue) { + if (arrowRecordBatchBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + arrowRecordBatchBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+       * Serialized row data in Arrow RecordBatch format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + */ + public Builder mergeArrowRecordBatch( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch value) { + if (arrowRecordBatchBuilder_ == null) { + if (rowsCase_ == 4 + && rows_ + != com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + .getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch.newBuilder( + (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch) + rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 4) { + arrowRecordBatchBuilder_.mergeFrom(value); + } else { + arrowRecordBatchBuilder_.setMessage(value); + } + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+       * Serialized row data in Arrow RecordBatch format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + */ + public Builder clearArrowRecordBatch() { + if (arrowRecordBatchBuilder_ == null) { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + } + arrowRecordBatchBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Serialized row data in Arrow RecordBatch format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch.Builder + getArrowRecordBatchBuilder() { + return getArrowRecordBatchFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Serialized row data in Arrow RecordBatch format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatchOrBuilder + getArrowRecordBatchOrBuilder() { + if ((rowsCase_ == 4) && (arrowRecordBatchBuilder_ != null)) { + return arrowRecordBatchBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Serialized row data in Arrow RecordBatch format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ArrowRecordBatch arrow_record_batch = 4; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatchOrBuilder> + getArrowRecordBatchFieldBuilder() { + if (arrowRecordBatchBuilder_ == null) { + if (!(rowsCase_ == 4)) { + rows_ = + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch + .getDefaultInstance(); + } + arrowRecordBatchBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatchOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowRecordBatch) rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 4; + onChanged(); + return arrowRecordBatchBuilder_; + } + + private long rowCount_; + + /** + * + * + *
+       * Number of serialized rows in the rows block. This value is recorded here,
+       * in addition to the row_count values in the output-specific messages in
+       * `rows`, so that code which needs to record progress through the stream can
+       * do so in an output format-independent way.
+       * 
+ * + * int64 row_count = 6; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + /** + * + * + *
+       * Number of serialized rows in the rows block. This value is recorded here,
+       * in addition to the row_count values in the output-specific messages in
+       * `rows`, so that code which needs to record progress through the stream can
+       * do so in an output format-independent way.
+       * 
+ * + * int64 row_count = 6; + * + * @param value The rowCount to set. + * @return This builder for chaining. + */ + public Builder setRowCount(long value) { + + rowCount_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Number of serialized rows in the rows block. This value is recorded here,
+       * in addition to the row_count values in the output-specific messages in
+       * `rows`, so that code which needs to record progress through the stream can
+       * do so in an output format-independent way.
+       * 
+ * + * int64 row_count = 6; + * + * @return This builder for chaining. + */ + public Builder clearRowCount() { + bitField0_ = (bitField0_ & ~0x00000004); + rowCount_ = 0L; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus status_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatusOrBuilder> + statusBuilder_; + + /** + * + * + *
+       * Estimated stream statistics.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + * + * @return Whether the status field is set. + */ + public boolean hasStatus() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+       * Estimated stream statistics.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + * + * @return The status. + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus getStatus() { + if (statusBuilder_ == null) { + return status_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.getDefaultInstance() + : status_; + } else { + return statusBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Estimated stream statistics.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + */ + public Builder setStatus( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus value) { + if (statusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + status_ = value; + } else { + statusBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+       * Estimated stream statistics.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + */ + public Builder setStatus( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.Builder builderForValue) { + if (statusBuilder_ == null) { + status_ = builderForValue.build(); + } else { + statusBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+       * Estimated stream statistics.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + */ + public Builder mergeStatus( + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus value) { + if (statusBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && status_ != null + && status_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus + .getDefaultInstance()) { + getStatusBuilder().mergeFrom(value); + } else { + status_ = value; + } + } else { + statusBuilder_.mergeFrom(value); + } + if (status_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Estimated stream statistics.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + */ + public Builder clearStatus() { + bitField0_ = (bitField0_ & ~0x00000008); + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Estimated stream statistics.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.Builder + getStatusBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getStatusFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Estimated stream statistics.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatusOrBuilder + getStatusOrBuilder() { + if (statusBuilder_ != null) { + return statusBuilder_.getMessageOrBuilder(); + } else { + return status_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.getDefaultInstance() + : status_; + } + } + + /** + * + * + *
+       * Estimated stream statistics.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.StreamStatus status = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatusOrBuilder> + getStatusFieldBuilder() { + if (statusBuilder_ == null) { + statusBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatus.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamStatusOrBuilder>( + getStatus(), getParentForChildren(), isClean()); + status_ = null; + } + return statusBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus throttleStatus_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus, + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatusOrBuilder> + throttleStatusBuilder_; + + /** + * + * + *
+       * Throttling status. If unset, the latest response still describes
+       * the current throttling status.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + * + * @return Whether the throttleStatus field is set. + */ + public boolean hasThrottleStatus() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+       * Throttling status. If unset, the latest response still describes
+       * the current throttling status.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + * + * @return The throttleStatus. + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus getThrottleStatus() { + if (throttleStatusBuilder_ == null) { + return throttleStatus_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus + .getDefaultInstance() + : throttleStatus_; + } else { + return throttleStatusBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Throttling status. If unset, the latest response still describes
+       * the current throttling status.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + */ + public Builder setThrottleStatus( + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus value) { + if (throttleStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + throttleStatus_ = value; + } else { + throttleStatusBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+       * Throttling status. If unset, the latest response still describes
+       * the current throttling status.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + */ + public Builder setThrottleStatus( + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.Builder + builderForValue) { + if (throttleStatusBuilder_ == null) { + throttleStatus_ = builderForValue.build(); + } else { + throttleStatusBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+       * Throttling status. If unset, the latest response still describes
+       * the current throttling status.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + */ + public Builder mergeThrottleStatus( + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus value) { + if (throttleStatusBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && throttleStatus_ != null + && throttleStatus_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus + .getDefaultInstance()) { + getThrottleStatusBuilder().mergeFrom(value); + } else { + throttleStatus_ = value; + } + } else { + throttleStatusBuilder_.mergeFrom(value); + } + if (throttleStatus_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Throttling status. If unset, the latest response still describes
+       * the current throttling status.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + */ + public Builder clearThrottleStatus() { + bitField0_ = (bitField0_ & ~0x00000010); + throttleStatus_ = null; + if (throttleStatusBuilder_ != null) { + throttleStatusBuilder_.dispose(); + throttleStatusBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Throttling status. If unset, the latest response still describes
+       * the current throttling status.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.Builder + getThrottleStatusBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getThrottleStatusFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Throttling status. If unset, the latest response still describes
+       * the current throttling status.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatusOrBuilder + getThrottleStatusOrBuilder() { + if (throttleStatusBuilder_ != null) { + return throttleStatusBuilder_.getMessageOrBuilder(); + } else { + return throttleStatus_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus + .getDefaultInstance() + : throttleStatus_; + } + } + + /** + * + * + *
+       * Throttling status. If unset, the latest response still describes
+       * the current throttling status.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.ThrottleStatus throttle_status = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus, + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatusOrBuilder> + getThrottleStatusFieldBuilder() { + if (throttleStatusBuilder_ == null) { + throttleStatusBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus, + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatusOrBuilder>( + getThrottleStatus(), getParentForChildren(), isClean()); + throttleStatus_ = null; + } + return throttleStatusBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder> + avroSchemaBuilder_; + + /** + * + * + *
+       * Output only. Avro schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 7; + } + + /** + * + * + *
+       * Output only. Avro schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema getAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema + .getDefaultInstance(); + } else { + if (schemaCase_ == 7) { + return avroSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Output only. Avro schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + avroSchemaBuilder_.setMessage(value); + } + schemaCase_ = 7; + return this; + } + + /** + * + * + *
+       * Output only. Avro schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.Builder builderForValue) { + if (avroSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + avroSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 7; + return this; + } + + /** + * + * + *
+       * Output only. Avro schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeAvroSchema( + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 7 + && schema_ + != com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema + .getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 7) { + avroSchemaBuilder_.mergeFrom(value); + } else { + avroSchemaBuilder_.setMessage(value); + } + } + schemaCase_ = 7; + return this; + } + + /** + * + * + *
+       * Output only. Avro schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 7) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 7) { + schemaCase_ = 0; + schema_ = null; + } + avroSchemaBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Output only. Avro schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.Builder + getAvroSchemaBuilder() { + return getAvroSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Output only. Avro schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder + getAvroSchemaOrBuilder() { + if ((schemaCase_ == 7) && (avroSchemaBuilder_ != null)) { + return avroSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Output only. Avro schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder> + getAvroSchemaFieldBuilder() { + if (avroSchemaBuilder_ == null) { + if (!(schemaCase_ == 7)) { + schema_ = + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.getDefaultInstance(); + } + avroSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta1.AvroProto.AvroSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 7; + onChanged(); + return avroSchemaBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder> + arrowSchemaBuilder_; + + /** + * + * + *
+       * Output only. Arrow schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 8; + } + + /** + * + * + *
+       * Output only. Arrow schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema getArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + .getDefaultInstance(); + } else { + if (schemaCase_ == 8) { + return arrowSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Output only. Arrow schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(value); + } + schemaCase_ = 8; + return this; + } + + /** + * + * + *
+       * Output only. Arrow schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.Builder + builderForValue) { + if (arrowSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 8; + return this; + } + + /** + * + * + *
+       * Output only. Arrow schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeArrowSchema( + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 8 + && schema_ + != com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + .getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 8) { + arrowSchemaBuilder_.mergeFrom(value); + } else { + arrowSchemaBuilder_.setMessage(value); + } + } + schemaCase_ = 8; + return this; + } + + /** + * + * + *
+       * Output only. Arrow schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 8) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 8) { + schemaCase_ = 0; + schema_ = null; + } + arrowSchemaBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Output only. Arrow schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.Builder + getArrowSchemaBuilder() { + return getArrowSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Output only. Arrow schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder + getArrowSchemaOrBuilder() { + if ((schemaCase_ == 8) && (arrowSchemaBuilder_ != null)) { + return arrowSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + .getDefaultInstance(); + } + } + + /** + * + * + *
+       * Output only. Arrow schema.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder> + getArrowSchemaFieldBuilder() { + if (arrowSchemaBuilder_ == null) { + if (!(schemaCase_ == 8)) { + schema_ = + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema + .getDefaultInstance(); + } + arrowSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta1.ArrowProto.ArrowSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 8; + onChanged(); + return arrowSchemaBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.ReadRowsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.ReadRowsResponse) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadRowsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadRowsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface BatchCreateReadSessionStreamsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. Must be a non-expired session obtained from a call to
+     * CreateReadSession. Only the name field needs to be set.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the session field is set. + */ + boolean hasSession(); + + /** + * + * + *
+     * Required. Must be a non-expired session obtained from a call to
+     * CreateReadSession. Only the name field needs to be set.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The session. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession getSession(); + + /** + * + * + *
+     * Required. Must be a non-expired session obtained from a call to
+     * CreateReadSession. Only the name field needs to be set.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSessionOrBuilder getSessionOrBuilder(); + + /** + * + * + *
+     * Required. Number of new streams requested. Must be positive.
+     * Number of added streams may be less than this, see CreateReadSessionRequest
+     * for more information.
+     * 
+ * + * int32 requested_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The requestedStreams. + */ + int getRequestedStreams(); + } + + /** + * + * + *
+   * Information needed to request additional streams for an established read
+   * session.
+   * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest} + */ + public static final class BatchCreateReadSessionStreamsRequest + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest) + BatchCreateReadSessionStreamsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchCreateReadSessionStreamsRequest.newBuilder() to construct. + private BatchCreateReadSessionStreamsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCreateReadSessionStreamsRequest() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCreateReadSessionStreamsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + .class, + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + .Builder.class); + } + + private int bitField0_; + public static final int SESSION_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession session_; + + /** + * + * + *
+     * Required. Must be a non-expired session obtained from a call to
+     * CreateReadSession. Only the name field needs to be set.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the session field is set. + */ + @java.lang.Override + public boolean hasSession() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. Must be a non-expired session obtained from a call to
+     * CreateReadSession. Only the name field needs to be set.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The session. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession getSession() { + return session_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.getDefaultInstance() + : session_; + } + + /** + * + * + *
+     * Required. Must be a non-expired session obtained from a call to
+     * CreateReadSession. Only the name field needs to be set.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSessionOrBuilder + getSessionOrBuilder() { + return session_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.getDefaultInstance() + : session_; + } + + public static final int REQUESTED_STREAMS_FIELD_NUMBER = 2; + private int requestedStreams_ = 0; + + /** + * + * + *
+     * Required. Number of new streams requested. Must be positive.
+     * Number of added streams may be less than this, see CreateReadSessionRequest
+     * for more information.
+     * 
+ * + * int32 requested_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The requestedStreams. + */ + @java.lang.Override + public int getRequestedStreams() { + return requestedStreams_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getSession()); + } + if (requestedStreams_ != 0) { + output.writeInt32(2, requestedStreams_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getSession()); + } + if (requestedStreams_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, requestedStreams_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest) + obj; + + if (hasSession() != other.hasSession()) return false; + if (hasSession()) { + if (!getSession().equals(other.getSession())) return false; + } + if (getRequestedStreams() != other.getRequestedStreams()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasSession()) { + hash = (37 * hash) + SESSION_FIELD_NUMBER; + hash = (53 * hash) + getSession().hashCode(); + } + hash = (37 * hash) + REQUESTED_STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getRequestedStreams(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Information needed to request additional streams for an established read
+     * session.
+     * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest) + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest.class, + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getSessionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + session_ = null; + if (sessionBuilder_ != null) { + sessionBuilder_.dispose(); + sessionBuilder_ = null; + } + requestedStreams_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + result = + new com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.session_ = sessionBuilder_ == null ? session_ : sessionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.requestedStreams_ = requestedStreams_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest.getDefaultInstance()) return this; + if (other.hasSession()) { + mergeSession(other.getSession()); + } + if (other.getRequestedStreams() != 0) { + setRequestedStreams(other.getRequestedStreams()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getSessionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + requestedStreams_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession session_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSessionOrBuilder> + sessionBuilder_; + + /** + * + * + *
+       * Required. Must be a non-expired session obtained from a call to
+       * CreateReadSession. Only the name field needs to be set.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the session field is set. + */ + public boolean hasSession() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Required. Must be a non-expired session obtained from a call to
+       * CreateReadSession. Only the name field needs to be set.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The session. + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession getSession() { + if (sessionBuilder_ == null) { + return session_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.getDefaultInstance() + : session_; + } else { + return sessionBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Required. Must be a non-expired session obtained from a call to
+       * CreateReadSession. Only the name field needs to be set.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setSession( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession value) { + if (sessionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + session_ = value; + } else { + sessionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Must be a non-expired session obtained from a call to
+       * CreateReadSession. Only the name field needs to be set.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setSession( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.Builder builderForValue) { + if (sessionBuilder_ == null) { + session_ = builderForValue.build(); + } else { + sessionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Must be a non-expired session obtained from a call to
+       * CreateReadSession. Only the name field needs to be set.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeSession( + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession value) { + if (sessionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && session_ != null + && session_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession + .getDefaultInstance()) { + getSessionBuilder().mergeFrom(value); + } else { + session_ = value; + } + } else { + sessionBuilder_.mergeFrom(value); + } + if (session_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Required. Must be a non-expired session obtained from a call to
+       * CreateReadSession. Only the name field needs to be set.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearSession() { + bitField0_ = (bitField0_ & ~0x00000001); + session_ = null; + if (sessionBuilder_ != null) { + sessionBuilder_.dispose(); + sessionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Must be a non-expired session obtained from a call to
+       * CreateReadSession. Only the name field needs to be set.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.Builder + getSessionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getSessionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Required. Must be a non-expired session obtained from a call to
+       * CreateReadSession. Only the name field needs to be set.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSessionOrBuilder + getSessionOrBuilder() { + if (sessionBuilder_ != null) { + return sessionBuilder_.getMessageOrBuilder(); + } else { + return session_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.getDefaultInstance() + : session_; + } + } + + /** + * + * + *
+       * Required. Must be a non-expired session obtained from a call to
+       * CreateReadSession. Only the name field needs to be set.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.ReadSession session = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSessionOrBuilder> + getSessionFieldBuilder() { + if (sessionBuilder_ == null) { + sessionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSession.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.ReadSessionOrBuilder>( + getSession(), getParentForChildren(), isClean()); + session_ = null; + } + return sessionBuilder_; + } + + private int requestedStreams_; + + /** + * + * + *
+       * Required. Number of new streams requested. Must be positive.
+       * Number of added streams may be less than this, see CreateReadSessionRequest
+       * for more information.
+       * 
+ * + * int32 requested_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The requestedStreams. + */ + @java.lang.Override + public int getRequestedStreams() { + return requestedStreams_; + } + + /** + * + * + *
+       * Required. Number of new streams requested. Must be positive.
+       * Number of added streams may be less than this, see CreateReadSessionRequest
+       * for more information.
+       * 
+ * + * int32 requested_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The requestedStreams to set. + * @return This builder for chaining. + */ + public Builder setRequestedStreams(int value) { + + requestedStreams_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Number of new streams requested. Must be positive.
+       * Number of added streams may be less than this, see CreateReadSessionRequest
+       * for more information.
+       * 
+ * + * int32 requested_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearRequestedStreams() { + bitField0_ = (bitField0_ & ~0x00000002); + requestedStreams_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCreateReadSessionStreamsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface BatchCreateReadSessionStreamsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Newly added streams.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + java.util.List getStreamsList(); + + /** + * + * + *
+     * Newly added streams.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getStreams(int index); + + /** + * + * + *
+     * Newly added streams.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + int getStreamsCount(); + + /** + * + * + *
+     * Newly added streams.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + java.util.List + getStreamsOrBuilderList(); + + /** + * + * + *
+     * Newly added streams.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getStreamsOrBuilder( + int index); + } + + /** + * + * + *
+   * The response from `BatchCreateReadSessionStreams` returns the stream
+   * identifiers for the newly created streams.
+   * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse} + */ + public static final class BatchCreateReadSessionStreamsResponse + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse) + BatchCreateReadSessionStreamsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchCreateReadSessionStreamsResponse.newBuilder() to construct. + private BatchCreateReadSessionStreamsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCreateReadSessionStreamsResponse() { + streams_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCreateReadSessionStreamsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse.class, + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse.Builder.class); + } + + public static final int STREAMS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List streams_; + + /** + * + * + *
+     * Newly added streams.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + @java.lang.Override + public java.util.List + getStreamsList() { + return streams_; + } + + /** + * + * + *
+     * Newly added streams.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + getStreamsOrBuilderList() { + return streams_; + } + + /** + * + * + *
+     * Newly added streams.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + @java.lang.Override + public int getStreamsCount() { + return streams_.size(); + } + + /** + * + * + *
+     * Newly added streams.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getStreams(int index) { + return streams_.get(index); + } + + /** + * + * + *
+     * Newly added streams.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getStreamsOrBuilder( + int index) { + return streams_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < streams_.size(); i++) { + output.writeMessage(1, streams_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < streams_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, streams_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + other = + (com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse) + obj; + + if (!getStreamsList().equals(other.getStreamsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getStreamsCount() > 0) { + hash = (37 * hash) + STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getStreamsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * The response from `BatchCreateReadSessionStreams` returns the stream
+     * identifiers for the newly created streams.
+     * 
+ * + * Protobuf type {@code + * google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse) + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse.class, + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + } else { + streams_ = null; + streamsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + result = + new com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + result) { + if (streamsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + streams_ = java.util.Collections.unmodifiableList(streams_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.streams_ = streams_; + } else { + result.streams_ = streamsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse.getDefaultInstance()) return this; + if (streamsBuilder_ == null) { + if (!other.streams_.isEmpty()) { + if (streams_.isEmpty()) { + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureStreamsIsMutable(); + streams_.addAll(other.streams_); + } + onChanged(); + } + } else { + if (!other.streams_.isEmpty()) { + if (streamsBuilder_.isEmpty()) { + streamsBuilder_.dispose(); + streamsBuilder_ = null; + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000001); + streamsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getStreamsFieldBuilder() + : null; + } else { + streamsBuilder_.addAllMessages(other.streams_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.parser(), + extensionRegistry); + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(m); + } else { + streamsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List streams_ = + java.util.Collections.emptyList(); + + private void ensureStreamsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + streams_ = + new java.util.ArrayList( + streams_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + streamsBuilder_; + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public java.util.List + getStreamsList() { + if (streamsBuilder_ == null) { + return java.util.Collections.unmodifiableList(streams_); + } else { + return streamsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public int getStreamsCount() { + if (streamsBuilder_ == null) { + return streams_.size(); + } else { + return streamsBuilder_.getCount(); + } + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getStreams(int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public Builder setStreams( + int index, com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.set(index, value); + onChanged(); + } else { + streamsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public Builder setStreams( + int index, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.set(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public Builder addStreams(com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(value); + onChanged(); + } else { + streamsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public Builder addStreams( + int index, com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(index, value); + onChanged(); + } else { + streamsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public Builder addStreams( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public Builder addStreams( + int index, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public Builder addAllStreams( + java.lang.Iterable + values) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, streams_); + onChanged(); + } else { + streamsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public Builder clearStreams() { + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + streamsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public Builder removeStreams(int index) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.remove(index); + onChanged(); + } else { + streamsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder getStreamsBuilder( + int index) { + return getStreamsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getStreamsOrBuilder( + int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + getStreamsOrBuilderList() { + if (streamsBuilder_ != null) { + return streamsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(streams_); + } + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder addStreamsBuilder() { + return getStreamsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance()); + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder addStreamsBuilder( + int index) { + return getStreamsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance()); + } + + /** + * + * + *
+       * Newly added streams.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta1.Stream streams = 1; + */ + public java.util.List + getStreamsBuilderList() { + return getStreamsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + getStreamsFieldBuilder() { + if (streamsBuilder_ == null) { + streamsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder>( + streams_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + streams_ = null; + } + return streamsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage + .BatchCreateReadSessionStreamsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCreateReadSessionStreamsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.BatchCreateReadSessionStreamsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface FinalizeStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. Stream to finalize.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the stream field is set. + */ + boolean hasStream(); + + /** + * + * + *
+     * Required. Stream to finalize.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The stream. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getStream(); + + /** + * + * + *
+     * Required. Stream to finalize.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getStreamOrBuilder(); + } + + /** + * + * + *
+   * Request information for invoking `FinalizeStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest} + */ + public static final class FinalizeStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest) + FinalizeStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use FinalizeStreamRequest.newBuilder() to construct. + private FinalizeStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FinalizeStreamRequest() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FinalizeStreamRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_FinalizeStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_FinalizeStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest.Builder + .class); + } + + private int bitField0_; + public static final int STREAM_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta1.Storage.Stream stream_; + + /** + * + * + *
+     * Required. Stream to finalize.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the stream field is set. + */ + @java.lang.Override + public boolean hasStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. Stream to finalize.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The stream. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getStream() { + return stream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : stream_; + } + + /** + * + * + *
+     * Required. Stream to finalize.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getStreamOrBuilder() { + return stream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : stream_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getStream()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStream()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest) obj; + + if (hasStream() != other.hasStream()) return false; + if (hasStream()) { + if (!getStream().equals(other.getStream())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasStream()) { + hash = (37 * hash) + STREAM_FIELD_NUMBER; + hash = (53 * hash) + getStream().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Request information for invoking `FinalizeStream`.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest) + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_FinalizeStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_FinalizeStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getStreamFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + stream_ = null; + if (streamBuilder_ != null) { + streamBuilder_.dispose(); + streamBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_FinalizeStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest result = + new com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.stream_ = streamBuilder_ == null ? stream_ : streamBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest + .getDefaultInstance()) return this; + if (other.hasStream()) { + mergeStream(other.getStream()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + input.readMessage(getStreamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1beta1.Storage.Stream stream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + streamBuilder_; + + /** + * + * + *
+       * Required. Stream to finalize.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the stream field is set. + */ + public boolean hasStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Required. Stream to finalize.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The stream. + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getStream() { + if (streamBuilder_ == null) { + return stream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : stream_; + } else { + return streamBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Required. Stream to finalize.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setStream(com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (streamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stream_ = value; + } else { + streamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Stream to finalize.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder builderForValue) { + if (streamBuilder_ == null) { + stream_ = builderForValue.build(); + } else { + streamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Stream to finalize.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeStream(com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (streamBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && stream_ != null + && stream_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.Stream + .getDefaultInstance()) { + getStreamBuilder().mergeFrom(value); + } else { + stream_ = value; + } + } else { + streamBuilder_.mergeFrom(value); + } + if (stream_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Required. Stream to finalize.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearStream() { + bitField0_ = (bitField0_ & ~0x00000001); + stream_ = null; + if (streamBuilder_ != null) { + streamBuilder_.dispose(); + streamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Stream to finalize.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder getStreamBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getStreamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Required. Stream to finalize.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder + getStreamOrBuilder() { + if (streamBuilder_ != null) { + return streamBuilder_.getMessageOrBuilder(); + } else { + return stream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : stream_; + } + } + + /** + * + * + *
+       * Required. Stream to finalize.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + getStreamFieldBuilder() { + if (streamBuilder_ == null) { + streamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder>( + getStream(), getParentForChildren(), isClean()); + stream_ = null; + } + return streamBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FinalizeStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.FinalizeStreamRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface SplitReadStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. Stream to split.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the originalStream field is set. + */ + boolean hasOriginalStream(); + + /** + * + * + *
+     * Required. Stream to split.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The originalStream. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getOriginalStream(); + + /** + * + * + *
+     * Required. Stream to split.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getOriginalStreamOrBuilder(); + + /** + * + * + *
+     * A value in the range (0.0, 1.0) that specifies the fractional point at
+     * which the original stream should be split. The actual split point is
+     * evaluated on pre-filtered rows, so if a filter is provided, then there is
+     * no guarantee that the division of the rows between the new child streams
+     * will be proportional to this fractional value. Additionally, because the
+     * server-side unit for assigning data is collections of rows, this fraction
+     * will always map to to a data storage boundary on the server side.
+     * 
+ * + * float fraction = 2; + * + * @return The fraction. + */ + float getFraction(); + } + + /** + * + * + *
+   * Request information for `SplitReadStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest} + */ + public static final class SplitReadStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest) + SplitReadStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use SplitReadStreamRequest.newBuilder() to construct. + private SplitReadStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SplitReadStreamRequest() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SplitReadStreamRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest.Builder + .class); + } + + private int bitField0_; + public static final int ORIGINAL_STREAM_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1beta1.Storage.Stream originalStream_; + + /** + * + * + *
+     * Required. Stream to split.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the originalStream field is set. + */ + @java.lang.Override + public boolean hasOriginalStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Required. Stream to split.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The originalStream. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getOriginalStream() { + return originalStream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : originalStream_; + } + + /** + * + * + *
+     * Required. Stream to split.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder + getOriginalStreamOrBuilder() { + return originalStream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : originalStream_; + } + + public static final int FRACTION_FIELD_NUMBER = 2; + private float fraction_ = 0F; + + /** + * + * + *
+     * A value in the range (0.0, 1.0) that specifies the fractional point at
+     * which the original stream should be split. The actual split point is
+     * evaluated on pre-filtered rows, so if a filter is provided, then there is
+     * no guarantee that the division of the rows between the new child streams
+     * will be proportional to this fractional value. Additionally, because the
+     * server-side unit for assigning data is collections of rows, this fraction
+     * will always map to to a data storage boundary on the server side.
+     * 
+ * + * float fraction = 2; + * + * @return The fraction. + */ + @java.lang.Override + public float getFraction() { + return fraction_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getOriginalStream()); + } + if (java.lang.Float.floatToRawIntBits(fraction_) != 0) { + output.writeFloat(2, fraction_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getOriginalStream()); + } + if (java.lang.Float.floatToRawIntBits(fraction_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeFloatSize(2, fraction_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest) obj; + + if (hasOriginalStream() != other.hasOriginalStream()) return false; + if (hasOriginalStream()) { + if (!getOriginalStream().equals(other.getOriginalStream())) return false; + } + if (java.lang.Float.floatToIntBits(getFraction()) + != java.lang.Float.floatToIntBits(other.getFraction())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasOriginalStream()) { + hash = (37 * hash) + ORIGINAL_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getOriginalStream().hashCode(); + } + hash = (37 * hash) + FRACTION_FIELD_NUMBER; + hash = (53 * hash) + java.lang.Float.floatToIntBits(getFraction()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Request information for `SplitReadStream`.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest) + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getOriginalStreamFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + originalStream_ = null; + if (originalStreamBuilder_ != null) { + originalStreamBuilder_.dispose(); + originalStreamBuilder_ = null; + } + fraction_ = 0F; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest result = + new com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.originalStream_ = + originalStreamBuilder_ == null ? originalStream_ : originalStreamBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.fraction_ = fraction_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + .getDefaultInstance()) return this; + if (other.hasOriginalStream()) { + mergeOriginalStream(other.getOriginalStream()); + } + if (other.getFraction() != 0F) { + setFraction(other.getFraction()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + getOriginalStreamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 21: + { + fraction_ = input.readFloat(); + bitField0_ |= 0x00000002; + break; + } // case 21 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1beta1.Storage.Stream originalStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + originalStreamBuilder_; + + /** + * + * + *
+       * Required. Stream to split.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the originalStream field is set. + */ + public boolean hasOriginalStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Required. Stream to split.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The originalStream. + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getOriginalStream() { + if (originalStreamBuilder_ == null) { + return originalStream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : originalStream_; + } else { + return originalStreamBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Required. Stream to split.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setOriginalStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (originalStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + originalStream_ = value; + } else { + originalStreamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Stream to split.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setOriginalStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder builderForValue) { + if (originalStreamBuilder_ == null) { + originalStream_ = builderForValue.build(); + } else { + originalStreamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Stream to split.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeOriginalStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (originalStreamBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && originalStream_ != null + && originalStream_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.Stream + .getDefaultInstance()) { + getOriginalStreamBuilder().mergeFrom(value); + } else { + originalStream_ = value; + } + } else { + originalStreamBuilder_.mergeFrom(value); + } + if (originalStream_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Required. Stream to split.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearOriginalStream() { + bitField0_ = (bitField0_ & ~0x00000001); + originalStream_ = null; + if (originalStreamBuilder_ != null) { + originalStreamBuilder_.dispose(); + originalStreamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Required. Stream to split.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder + getOriginalStreamBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getOriginalStreamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Required. Stream to split.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder + getOriginalStreamOrBuilder() { + if (originalStreamBuilder_ != null) { + return originalStreamBuilder_.getMessageOrBuilder(); + } else { + return originalStream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : originalStream_; + } + } + + /** + * + * + *
+       * Required. Stream to split.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta1.Stream original_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + getOriginalStreamFieldBuilder() { + if (originalStreamBuilder_ == null) { + originalStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder>( + getOriginalStream(), getParentForChildren(), isClean()); + originalStream_ = null; + } + return originalStreamBuilder_; + } + + private float fraction_; + + /** + * + * + *
+       * A value in the range (0.0, 1.0) that specifies the fractional point at
+       * which the original stream should be split. The actual split point is
+       * evaluated on pre-filtered rows, so if a filter is provided, then there is
+       * no guarantee that the division of the rows between the new child streams
+       * will be proportional to this fractional value. Additionally, because the
+       * server-side unit for assigning data is collections of rows, this fraction
+       * will always map to to a data storage boundary on the server side.
+       * 
+ * + * float fraction = 2; + * + * @return The fraction. + */ + @java.lang.Override + public float getFraction() { + return fraction_; + } + + /** + * + * + *
+       * A value in the range (0.0, 1.0) that specifies the fractional point at
+       * which the original stream should be split. The actual split point is
+       * evaluated on pre-filtered rows, so if a filter is provided, then there is
+       * no guarantee that the division of the rows between the new child streams
+       * will be proportional to this fractional value. Additionally, because the
+       * server-side unit for assigning data is collections of rows, this fraction
+       * will always map to to a data storage boundary on the server side.
+       * 
+ * + * float fraction = 2; + * + * @param value The fraction to set. + * @return This builder for chaining. + */ + public Builder setFraction(float value) { + + fraction_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * A value in the range (0.0, 1.0) that specifies the fractional point at
+       * which the original stream should be split. The actual split point is
+       * evaluated on pre-filtered rows, so if a filter is provided, then there is
+       * no guarantee that the division of the rows between the new child streams
+       * will be proportional to this fractional value. Additionally, because the
+       * server-side unit for assigning data is collections of rows, this fraction
+       * will always map to to a data storage boundary on the server side.
+       * 
+ * + * float fraction = 2; + * + * @return This builder for chaining. + */ + public Builder clearFraction() { + bitField0_ = (bitField0_ & ~0x00000002); + fraction_ = 0F; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SplitReadStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface SplitReadStreamResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + * + * @return Whether the primaryStream field is set. + */ + boolean hasPrimaryStream(); + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + * + * @return The primaryStream. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getPrimaryStream(); + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getPrimaryStreamOrBuilder(); + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + * + * @return Whether the remainderStream field is set. + */ + boolean hasRemainderStream(); + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + * + * @return The remainderStream. + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getRemainderStream(); + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + */ + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder getRemainderStreamOrBuilder(); + } + + /** + * + * + *
+   * Response from `SplitReadStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse} + */ + public static final class SplitReadStreamResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse) + SplitReadStreamResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use SplitReadStreamResponse.newBuilder() to construct. + private SplitReadStreamResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SplitReadStreamResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SplitReadStreamResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse.Builder + .class); + } + + private int bitField0_; + public static final int PRIMARY_STREAM_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1beta1.Storage.Stream primaryStream_; + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + * + * @return Whether the primaryStream field is set. + */ + @java.lang.Override + public boolean hasPrimaryStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + * + * @return The primaryStream. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getPrimaryStream() { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : primaryStream_; + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder + getPrimaryStreamOrBuilder() { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : primaryStream_; + } + + public static final int REMAINDER_STREAM_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta1.Storage.Stream remainderStream_; + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + * + * @return Whether the remainderStream field is set. + */ + @java.lang.Override + public boolean hasRemainderStream() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + * + * @return The remainderStream. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getRemainderStream() { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : remainderStream_; + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder + getRemainderStreamOrBuilder() { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : remainderStream_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getPrimaryStream()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getRemainderStream()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getPrimaryStream()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getRemainderStream()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse other = + (com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse) obj; + + if (hasPrimaryStream() != other.hasPrimaryStream()) return false; + if (hasPrimaryStream()) { + if (!getPrimaryStream().equals(other.getPrimaryStream())) return false; + } + if (hasRemainderStream() != other.hasRemainderStream()) return false; + if (hasRemainderStream()) { + if (!getRemainderStream().equals(other.getRemainderStream())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasPrimaryStream()) { + hash = (37 * hash) + PRIMARY_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getPrimaryStream().hashCode(); + } + if (hasRemainderStream()) { + hash = (37 * hash) + REMAINDER_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getRemainderStream().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Response from `SplitReadStream`.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse) + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse.class, + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getPrimaryStreamFieldBuilder(); + getRemainderStreamFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + primaryStream_ = null; + if (primaryStreamBuilder_ != null) { + primaryStreamBuilder_.dispose(); + primaryStreamBuilder_ = null; + } + remainderStream_ = null; + if (remainderStreamBuilder_ != null) { + remainderStreamBuilder_.dispose(); + remainderStreamBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage + .internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse build() { + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse result = + new com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.primaryStream_ = + primaryStreamBuilder_ == null ? primaryStream_ : primaryStreamBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.remainderStream_ = + remainderStreamBuilder_ == null ? remainderStream_ : remainderStreamBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + .getDefaultInstance()) return this; + if (other.hasPrimaryStream()) { + mergePrimaryStream(other.getPrimaryStream()); + } + if (other.hasRemainderStream()) { + mergeRemainderStream(other.getRemainderStream()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getPrimaryStreamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + getRemainderStreamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1beta1.Storage.Stream primaryStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + primaryStreamBuilder_; + + /** + * + * + *
+       * Primary stream, which contains the beginning portion of
+       * |original_stream|. An empty value indicates that the original stream can no
+       * longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + * + * @return Whether the primaryStream field is set. + */ + public boolean hasPrimaryStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Primary stream, which contains the beginning portion of
+       * |original_stream|. An empty value indicates that the original stream can no
+       * longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + * + * @return The primaryStream. + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getPrimaryStream() { + if (primaryStreamBuilder_ == null) { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : primaryStream_; + } else { + return primaryStreamBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Primary stream, which contains the beginning portion of
+       * |original_stream|. An empty value indicates that the original stream can no
+       * longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + */ + public Builder setPrimaryStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (primaryStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + primaryStream_ = value; + } else { + primaryStreamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Primary stream, which contains the beginning portion of
+       * |original_stream|. An empty value indicates that the original stream can no
+       * longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + */ + public Builder setPrimaryStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder builderForValue) { + if (primaryStreamBuilder_ == null) { + primaryStream_ = builderForValue.build(); + } else { + primaryStreamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Primary stream, which contains the beginning portion of
+       * |original_stream|. An empty value indicates that the original stream can no
+       * longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + */ + public Builder mergePrimaryStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (primaryStreamBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && primaryStream_ != null + && primaryStream_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.Stream + .getDefaultInstance()) { + getPrimaryStreamBuilder().mergeFrom(value); + } else { + primaryStream_ = value; + } + } else { + primaryStreamBuilder_.mergeFrom(value); + } + if (primaryStream_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Primary stream, which contains the beginning portion of
+       * |original_stream|. An empty value indicates that the original stream can no
+       * longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + */ + public Builder clearPrimaryStream() { + bitField0_ = (bitField0_ & ~0x00000001); + primaryStream_ = null; + if (primaryStreamBuilder_ != null) { + primaryStreamBuilder_.dispose(); + primaryStreamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Primary stream, which contains the beginning portion of
+       * |original_stream|. An empty value indicates that the original stream can no
+       * longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder + getPrimaryStreamBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getPrimaryStreamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Primary stream, which contains the beginning portion of
+       * |original_stream|. An empty value indicates that the original stream can no
+       * longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder + getPrimaryStreamOrBuilder() { + if (primaryStreamBuilder_ != null) { + return primaryStreamBuilder_.getMessageOrBuilder(); + } else { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : primaryStream_; + } + } + + /** + * + * + *
+       * Primary stream, which contains the beginning portion of
+       * |original_stream|. An empty value indicates that the original stream can no
+       * longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream primary_stream = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + getPrimaryStreamFieldBuilder() { + if (primaryStreamBuilder_ == null) { + primaryStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder>( + getPrimaryStream(), getParentForChildren(), isClean()); + primaryStream_ = null; + } + return primaryStreamBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta1.Storage.Stream remainderStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + remainderStreamBuilder_; + + /** + * + * + *
+       * Remainder stream, which contains the tail of |original_stream|. An empty
+       * value indicates that the original stream can no longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + * + * @return Whether the remainderStream field is set. + */ + public boolean hasRemainderStream() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Remainder stream, which contains the tail of |original_stream|. An empty
+       * value indicates that the original stream can no longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + * + * @return The remainderStream. + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream getRemainderStream() { + if (remainderStreamBuilder_ == null) { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : remainderStream_; + } else { + return remainderStreamBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Remainder stream, which contains the tail of |original_stream|. An empty
+       * value indicates that the original stream can no longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + */ + public Builder setRemainderStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (remainderStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + remainderStream_ = value; + } else { + remainderStreamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Remainder stream, which contains the tail of |original_stream|. An empty
+       * value indicates that the original stream can no longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + */ + public Builder setRemainderStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder builderForValue) { + if (remainderStreamBuilder_ == null) { + remainderStream_ = builderForValue.build(); + } else { + remainderStreamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Remainder stream, which contains the tail of |original_stream|. An empty
+       * value indicates that the original stream can no longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + */ + public Builder mergeRemainderStream( + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream value) { + if (remainderStreamBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && remainderStream_ != null + && remainderStream_ + != com.google.cloud.bigquery.storage.v1beta1.Storage.Stream + .getDefaultInstance()) { + getRemainderStreamBuilder().mergeFrom(value); + } else { + remainderStream_ = value; + } + } else { + remainderStreamBuilder_.mergeFrom(value); + } + if (remainderStream_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Remainder stream, which contains the tail of |original_stream|. An empty
+       * value indicates that the original stream can no longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + */ + public Builder clearRemainderStream() { + bitField0_ = (bitField0_ & ~0x00000002); + remainderStream_ = null; + if (remainderStreamBuilder_ != null) { + remainderStreamBuilder_.dispose(); + remainderStreamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Remainder stream, which contains the tail of |original_stream|. An empty
+       * value indicates that the original stream can no longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder + getRemainderStreamBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRemainderStreamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Remainder stream, which contains the tail of |original_stream|. An empty
+       * value indicates that the original stream can no longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + */ + public com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder + getRemainderStreamOrBuilder() { + if (remainderStreamBuilder_ != null) { + return remainderStreamBuilder_.getMessageOrBuilder(); + } else { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.getDefaultInstance() + : remainderStream_; + } + } + + /** + * + * + *
+       * Remainder stream, which contains the tail of |original_stream|. An empty
+       * value indicates that the original stream can no longer be split.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta1.Stream remainder_stream = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder> + getRemainderStreamFieldBuilder() { + if (remainderStreamBuilder_ == null) { + remainderStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream, + com.google.cloud.bigquery.storage.v1beta1.Storage.Stream.Builder, + com.google.cloud.bigquery.storage.v1beta1.Storage.StreamOrBuilder>( + getRemainderStream(), getParentForChildren(), isClean()); + remainderStream_ = null; + } + return remainderStreamBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse) + private static final com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SplitReadStreamResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.Storage.SplitReadStreamResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_Stream_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_Stream_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_StreamPosition_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_StreamPosition_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_ReadSession_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_ReadSession_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_CreateReadSessionRequest_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_CreateReadSessionRequest_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsRequest_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsRequest_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_StreamStatus_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_StreamStatus_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_Progress_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_Progress_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_ThrottleStatus_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_ThrottleStatus_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsResponse_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsResponse_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsRequest_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsRequest_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsResponse_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsResponse_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_FinalizeStreamRequest_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_FinalizeStreamRequest_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamRequest_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamRequest_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamResponse_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n3google/cloud/bigquery/storage/v1beta1/" + + "storage.proto\022%google.cloud.bigquery.sto" + + "rage.v1beta1\032\034google/api/annotations.pro" + + "to\032\027google/api/client.proto\032\037google/api/" + + "field_behavior.proto\032\031google/api/resourc" + + "e.proto\0321google/cloud/bigquery/storage/v" + + "1beta1/arrow.proto\0320google/cloud/bigquer" + + "y/storage/v1beta1/avro.proto\0328google/clo" + + "ud/bigquery/storage/v1beta1/read_options" + + ".proto\032;google/cloud/bigquery/storage/v1" + + "beta1/table_reference.proto\032\033google/prot" + + "obuf/empty.proto\032\037google/protobuf/timest" + + "amp.proto\"|\n\006Stream\022\014\n\004name\030\001 \001(\t:d\352Aa\n%" + + "bigquerystorage.googleapis.com/Stream\0228p" + + "rojects/{project}/locations/{location}/s" + + "treams/{stream}\"_\n\016StreamPosition\022=\n\006str" + + "eam\030\001 \001(\0132-.google.cloud.bigquery.storag" + + "e.v1beta1.Stream\022\016\n\006offset\030\002 \001(\003\"\215\005\n\013Rea" + + "dSession\022\014\n\004name\030\001 \001(\t\022/\n\013expire_time\030\002 " + + "\001(\0132\032.google.protobuf.Timestamp\022H\n\013avro_" + + "schema\030\005 \001(\01321.google.cloud.bigquery.sto" + + "rage.v1beta1.AvroSchemaH\000\022J\n\014arrow_schem" + + "a\030\006 \001(\01322.google.cloud.bigquery.storage." + + "v1beta1.ArrowSchemaH\000\022>\n\007streams\030\004 \003(\0132-" + + ".google.cloud.bigquery.storage.v1beta1.S" + + "tream\022N\n\017table_reference\030\007 \001(\01325.google." + + "cloud.bigquery.storage.v1beta1.TableRefe" + + "rence\022N\n\017table_modifiers\030\010 \001(\01325.google." + + "cloud.bigquery.storage.v1beta1.TableModi" + + "fiers\022R\n\021sharding_strategy\030\t \001(\01627.googl" + + "e.cloud.bigquery.storage.v1beta1.Shardin" + + "gStrategy:k\352Ah\n*bigquerystorage.googleap" + + "is.com/ReadSession\022:projects/{project}/l" + + "ocations/{location}/sessions/{session}B\010" + + "\n\006schema\"\205\004\n\030CreateReadSessionRequest\022S\n" + + "\017table_reference\030\001 \001(\01325.google.cloud.bi" + + "gquery.storage.v1beta1.TableReferenceB\003\340" + + "A\002\022C\n\006parent\030\006 \001(\tB3\340A\002\372A-\n+cloudresourc" + + "emanager.googleapis.com/Project\022N\n\017table" + + "_modifiers\030\002 \001(\01325.google.cloud.bigquery" + + ".storage.v1beta1.TableModifiers\022\031\n\021reque" + + "sted_streams\030\003 \001(\005\022M\n\014read_options\030\004 \001(\013" + + "27.google.cloud.bigquery.storage.v1beta1" + + ".TableReadOptions\022A\n\006format\030\005 \001(\01621.goog" + + "le.cloud.bigquery.storage.v1beta1.DataFo" + + "rmat\022R\n\021sharding_strategy\030\007 \001(\01627.google" + + ".cloud.bigquery.storage.v1beta1.Sharding" + + "Strategy\"d\n\017ReadRowsRequest\022Q\n\rread_posi" + + "tion\030\001 \001(\01325.google.cloud.bigquery.stora" + + "ge.v1beta1.StreamPositionB\003\340A\002\"\240\001\n\014Strea" + + "mStatus\022\033\n\023estimated_row_count\030\001 \001(\003\022\031\n\021" + + "fraction_consumed\030\002 \001(\002\022A\n\010progress\030\004 \001(" + + "\0132/.google.cloud.bigquery.storage.v1beta" + + "1.Progress\022\025\n\ris_splittable\030\003 \001(\010\">\n\010Pro" + + "gress\022\031\n\021at_response_start\030\001 \001(\002\022\027\n\017at_r" + + "esponse_end\030\002 \001(\002\"*\n\016ThrottleStatus\022\030\n\020t" + + "hrottle_percent\030\001 \001(\005\"\211\004\n\020ReadRowsRespon" + + "se\022D\n\tavro_rows\030\003 \001(\0132/.google.cloud.big" + + "query.storage.v1beta1.AvroRowsH\000\022U\n\022arro" + + "w_record_batch\030\004 \001(\01327.google.cloud.bigq" + + "uery.storage.v1beta1.ArrowRecordBatchH\000\022" + + "\021\n\trow_count\030\006 \001(\003\022C\n\006status\030\002 \001(\01323.goo" + + "gle.cloud.bigquery.storage.v1beta1.Strea" + + "mStatus\022N\n\017throttle_status\030\005 \001(\01325.googl" + + "e.cloud.bigquery.storage.v1beta1.Throttl" + + "eStatus\022M\n\013avro_schema\030\007 \001(\01321.google.cl" + + "oud.bigquery.storage.v1beta1.AvroSchemaB" + + "\003\340A\003H\001\022O\n\014arrow_schema\030\010 \001(\01322.google.cl" + + "oud.bigquery.storage.v1beta1.ArrowSchema" + + "B\003\340A\003H\001B\006\n\004rowsB\010\n\006schema\"\220\001\n$BatchCreat" + + "eReadSessionStreamsRequest\022H\n\007session\030\001 " + + "\001(\01322.google.cloud.bigquery.storage.v1be" + + "ta1.ReadSessionB\003\340A\002\022\036\n\021requested_stream" + + "s\030\002 \001(\005B\003\340A\002\"g\n%BatchCreateReadSessionSt" + + "reamsResponse\022>\n\007streams\030\001 \003(\0132-.google." + + "cloud.bigquery.storage.v1beta1.Stream\"[\n" + + "\025FinalizeStreamRequest\022B\n\006stream\030\002 \001(\0132-" + + ".google.cloud.bigquery.storage.v1beta1.S" + + "treamB\003\340A\002\"w\n\026SplitReadStreamRequest\022K\n\017" + + "original_stream\030\001 \001(\0132-.google.cloud.big" + + "query.storage.v1beta1.StreamB\003\340A\002\022\020\n\010fra" + + "ction\030\002 \001(\002\"\251\001\n\027SplitReadStreamResponse\022" + + "E\n\016primary_stream\030\001 \001(\0132-.google.cloud.b" + + "igquery.storage.v1beta1.Stream\022G\n\020remain" + + "der_stream\030\002 \001(\0132-.google.cloud.bigquery" + + ".storage.v1beta1.Stream*>\n\nDataFormat\022\033\n" + + "\027DATA_FORMAT_UNSPECIFIED\020\000\022\010\n\004AVRO\020\001\022\t\n\005" + + "ARROW\020\003*O\n\020ShardingStrategy\022!\n\035SHARDING_" + + "STRATEGY_UNSPECIFIED\020\000\022\n\n\006LIQUID\020\001\022\014\n\010BA" + + "LANCED\020\0022\267\n\n\017BigQueryStorage\022\263\002\n\021CreateR" + + "eadSession\022?.google.cloud.bigquery.stora" + + "ge.v1beta1.CreateReadSessionRequest\0322.go" + + "ogle.cloud.bigquery.storage.v1beta1.Read" + + "Session\"\250\001\332A(table_reference,parent,requ" + + "ested_streams\202\323\344\223\002w\"0/v1beta1/{table_ref" + + "erence.project_id=projects/*}:\001*Z@\";/v1b" + + "eta1/{table_reference.dataset_id=project" + + "s/*/datasets/*}:\001*\022\320\001\n\010ReadRows\0226.google" + + ".cloud.bigquery.storage.v1beta1.ReadRows" + + "Request\0327.google.cloud.bigquery.storage." + + "v1beta1.ReadRowsResponse\"Q\332A\rread_positi" + + "on\202\323\344\223\002;\0229/v1beta1/{read_position.stream" + + ".name=projects/*/streams/*}0\001\022\220\002\n\035BatchC" + + "reateReadSessionStreams\022K.google.cloud.b" + + "igquery.storage.v1beta1.BatchCreateReadS" + + "essionStreamsRequest\032L.google.cloud.bigq" + + "uery.storage.v1beta1.BatchCreateReadSess" + + "ionStreamsResponse\"T\332A\031session,requested" + + "_streams\202\323\344\223\0022\"-/v1beta1/{session.name=p" + + "rojects/*/sessions/*}:\001*\022\247\001\n\016FinalizeStr" + + "eam\022<.google.cloud.bigquery.storage.v1be" + + "ta1.FinalizeStreamRequest\032\026.google.proto" + + "buf.Empty\"?\332A\006stream\202\323\344\223\0020\"+/v1beta1/{st" + + "ream.name=projects/*/streams/*}:\001*\022\340\001\n\017S" + + "plitReadStream\022=.google.cloud.bigquery.s" + + "torage.v1beta1.SplitReadStreamRequest\032>." + + "google.cloud.bigquery.storage.v1beta1.Sp" + + "litReadStreamResponse\"N\332A\017original_strea" + + "m\202\323\344\223\0026\0224/v1beta1/{original_stream.name=" + + "projects/*/streams/*}\032{\312A\036bigquerystorag" + + "e.googleapis.com\322AWhttps://www.googleapi" + + "s.com/auth/bigquery,https://www.googleap" + + "is.com/auth/cloud-platformBp\n)com.google" + + ".cloud.bigquery.storage.v1beta1ZCcloud.g" + + "oogle.com/go/bigquery/storage/apiv1beta1" + + "/storagepb;storagepbb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta1.AvroProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1beta1_Stream_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta1_Stream_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_Stream_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_StreamPosition_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta1_StreamPosition_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_StreamPosition_descriptor, + new java.lang.String[] { + "Stream", "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_ReadSession_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1beta1_ReadSession_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_ReadSession_descriptor, + new java.lang.String[] { + "Name", + "ExpireTime", + "AvroSchema", + "ArrowSchema", + "Streams", + "TableReference", + "TableModifiers", + "ShardingStrategy", + "Schema", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_CreateReadSessionRequest_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_bigquery_storage_v1beta1_CreateReadSessionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_CreateReadSessionRequest_descriptor, + new java.lang.String[] { + "TableReference", + "Parent", + "TableModifiers", + "RequestedStreams", + "ReadOptions", + "Format", + "ShardingStrategy", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsRequest_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsRequest_descriptor, + new java.lang.String[] { + "ReadPosition", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_StreamStatus_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_bigquery_storage_v1beta1_StreamStatus_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_StreamStatus_descriptor, + new java.lang.String[] { + "EstimatedRowCount", "FractionConsumed", "Progress", "IsSplittable", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_Progress_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_bigquery_storage_v1beta1_Progress_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_Progress_descriptor, + new java.lang.String[] { + "AtResponseStart", "AtResponseEnd", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_ThrottleStatus_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_bigquery_storage_v1beta1_ThrottleStatus_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_ThrottleStatus_descriptor, + new java.lang.String[] { + "ThrottlePercent", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsResponse_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_ReadRowsResponse_descriptor, + new java.lang.String[] { + "AvroRows", + "ArrowRecordBatch", + "RowCount", + "Status", + "ThrottleStatus", + "AvroSchema", + "ArrowSchema", + "Rows", + "Schema", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsRequest_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsRequest_descriptor, + new java.lang.String[] { + "Session", "RequestedStreams", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsResponse_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_BatchCreateReadSessionStreamsResponse_descriptor, + new java.lang.String[] { + "Streams", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_FinalizeStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_google_cloud_bigquery_storage_v1beta1_FinalizeStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_FinalizeStreamRequest_descriptor, + new java.lang.String[] { + "Stream", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamRequest_descriptor, + new java.lang.String[] { + "OriginalStream", "Fraction", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamResponse_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_SplitReadStreamResponse_descriptor, + new java.lang.String[] { + "PrimaryStream", "RemainderStream", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resource); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta1.ArrowProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta1.AvroProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta1.ReadOptions.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/TableReferenceProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/TableReferenceProto.java new file mode 100644 index 000000000000..d5930ba36cef --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/TableReferenceProto.java @@ -0,0 +1,1987 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta1/table_reference.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta1; + +public final class TableReferenceProto { + private TableReferenceProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + public interface TableReferenceOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.TableReference) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The assigned project ID of the project.
+     * 
+ * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
+     * The assigned project ID of the project.
+     * 
+ * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
+     * The ID of the dataset in the above project.
+     * 
+ * + * string dataset_id = 2; + * + * @return The datasetId. + */ + java.lang.String getDatasetId(); + + /** + * + * + *
+     * The ID of the dataset in the above project.
+     * 
+ * + * string dataset_id = 2; + * + * @return The bytes for datasetId. + */ + com.google.protobuf.ByteString getDatasetIdBytes(); + + /** + * + * + *
+     * The ID of the table in the above dataset.
+     * 
+ * + * string table_id = 3; + * + * @return The tableId. + */ + java.lang.String getTableId(); + + /** + * + * + *
+     * The ID of the table in the above dataset.
+     * 
+ * + * string table_id = 3; + * + * @return The bytes for tableId. + */ + com.google.protobuf.ByteString getTableIdBytes(); + } + + /** + * + * + *
+   * Table reference that includes just the 3 strings needed to identify a table.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.TableReference} + */ + public static final class TableReference extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.TableReference) + TableReferenceOrBuilder { + private static final long serialVersionUID = 0L; + + // Use TableReference.newBuilder() to construct. + private TableReference(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableReference() { + projectId_ = ""; + datasetId_ = ""; + tableId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableReference(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .internal_static_google_cloud_bigquery_storage_v1beta1_TableReference_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .internal_static_google_cloud_bigquery_storage_v1beta1_TableReference_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.class, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.Builder + .class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
+     * The assigned project ID of the project.
+     * 
+ * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
+     * The assigned project ID of the project.
+     * 
+ * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATASET_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object datasetId_ = ""; + + /** + * + * + *
+     * The ID of the dataset in the above project.
+     * 
+ * + * string dataset_id = 2; + * + * @return The datasetId. + */ + @java.lang.Override + public java.lang.String getDatasetId() { + java.lang.Object ref = datasetId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + datasetId_ = s; + return s; + } + } + + /** + * + * + *
+     * The ID of the dataset in the above project.
+     * 
+ * + * string dataset_id = 2; + * + * @return The bytes for datasetId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatasetIdBytes() { + java.lang.Object ref = datasetId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + datasetId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TABLE_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object tableId_ = ""; + + /** + * + * + *
+     * The ID of the table in the above dataset.
+     * 
+ * + * string table_id = 3; + * + * @return The tableId. + */ + @java.lang.Override + public java.lang.String getTableId() { + java.lang.Object ref = tableId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + tableId_ = s; + return s; + } + } + + /** + * + * + *
+     * The ID of the table in the above dataset.
+     * 
+ * + * string table_id = 3; + * + * @return The bytes for tableId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableIdBytes() { + java.lang.Object ref = tableId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + tableId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(datasetId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, datasetId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(tableId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, tableId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(datasetId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, datasetId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(tableId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, tableId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference other = + (com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getDatasetId().equals(other.getDatasetId())) return false; + if (!getTableId().equals(other.getTableId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + DATASET_ID_FIELD_NUMBER; + hash = (53 * hash) + getDatasetId().hashCode(); + hash = (37 * hash) + TABLE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTableId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Table reference that includes just the 3 strings needed to identify a table.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.TableReference} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.TableReference) + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReferenceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .internal_static_google_cloud_bigquery_storage_v1beta1_TableReference_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .internal_static_google_cloud_bigquery_storage_v1beta1_TableReference_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.class, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + datasetId_ = ""; + tableId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .internal_static_google_cloud_bigquery_storage_v1beta1_TableReference_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference build() { + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference result = + new com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.datasetId_ = datasetId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.tableId_ = tableId_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + .getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getDatasetId().isEmpty()) { + datasetId_ = other.datasetId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getTableId().isEmpty()) { + tableId_ = other.tableId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + datasetId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + tableId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
+       * The assigned project ID of the project.
+       * 
+ * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * The assigned project ID of the project.
+       * 
+ * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * The assigned project ID of the project.
+       * 
+ * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The assigned project ID of the project.
+       * 
+ * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+       * The assigned project ID of the project.
+       * 
+ * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object datasetId_ = ""; + + /** + * + * + *
+       * The ID of the dataset in the above project.
+       * 
+ * + * string dataset_id = 2; + * + * @return The datasetId. + */ + public java.lang.String getDatasetId() { + java.lang.Object ref = datasetId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + datasetId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * The ID of the dataset in the above project.
+       * 
+ * + * string dataset_id = 2; + * + * @return The bytes for datasetId. + */ + public com.google.protobuf.ByteString getDatasetIdBytes() { + java.lang.Object ref = datasetId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + datasetId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * The ID of the dataset in the above project.
+       * 
+ * + * string dataset_id = 2; + * + * @param value The datasetId to set. + * @return This builder for chaining. + */ + public Builder setDatasetId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + datasetId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * The ID of the dataset in the above project.
+       * 
+ * + * string dataset_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearDatasetId() { + datasetId_ = getDefaultInstance().getDatasetId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+       * The ID of the dataset in the above project.
+       * 
+ * + * string dataset_id = 2; + * + * @param value The bytes for datasetId to set. + * @return This builder for chaining. + */ + public Builder setDatasetIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + datasetId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object tableId_ = ""; + + /** + * + * + *
+       * The ID of the table in the above dataset.
+       * 
+ * + * string table_id = 3; + * + * @return The tableId. + */ + public java.lang.String getTableId() { + java.lang.Object ref = tableId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + tableId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * The ID of the table in the above dataset.
+       * 
+ * + * string table_id = 3; + * + * @return The bytes for tableId. + */ + public com.google.protobuf.ByteString getTableIdBytes() { + java.lang.Object ref = tableId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + tableId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * The ID of the table in the above dataset.
+       * 
+ * + * string table_id = 3; + * + * @param value The tableId to set. + * @return This builder for chaining. + */ + public Builder setTableId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + tableId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * The ID of the table in the above dataset.
+       * 
+ * + * string table_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearTableId() { + tableId_ = getDefaultInstance().getTableId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+       * The ID of the table in the above dataset.
+       * 
+ * + * string table_id = 3; + * + * @param value The bytes for tableId to set. + * @return This builder for chaining. + */ + public Builder setTableIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + tableId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.TableReference) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.TableReference) + private static final com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .TableReference + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableReference parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface TableModifiersOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta1.TableModifiers) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return Whether the snapshotTime field is set. + */ + boolean hasSnapshotTime(); + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return The snapshotTime. + */ + com.google.protobuf.Timestamp getSnapshotTime(); + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + com.google.protobuf.TimestampOrBuilder getSnapshotTimeOrBuilder(); + } + + /** + * + * + *
+   * All fields in this message optional.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.TableModifiers} + */ + public static final class TableModifiers extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta1.TableModifiers) + TableModifiersOrBuilder { + private static final long serialVersionUID = 0L; + + // Use TableModifiers.newBuilder() to construct. + private TableModifiers(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableModifiers() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableModifiers(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .internal_static_google_cloud_bigquery_storage_v1beta1_TableModifiers_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .internal_static_google_cloud_bigquery_storage_v1beta1_TableModifiers_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.class, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.Builder + .class); + } + + private int bitField0_; + public static final int SNAPSHOT_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp snapshotTime_; + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return Whether the snapshotTime field is set. + */ + @java.lang.Override + public boolean hasSnapshotTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return The snapshotTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getSnapshotTime() { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getSnapshotTimeOrBuilder() { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getSnapshotTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getSnapshotTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers other = + (com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers) obj; + + if (hasSnapshotTime() != other.hasSnapshotTime()) return false; + if (hasSnapshotTime()) { + if (!getSnapshotTime().equals(other.getSnapshotTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasSnapshotTime()) { + hash = (37 * hash) + SNAPSHOT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getSnapshotTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * All fields in this message optional.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta1.TableModifiers} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta1.TableModifiers) + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiersOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .internal_static_google_cloud_bigquery_storage_v1beta1_TableModifiers_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .internal_static_google_cloud_bigquery_storage_v1beta1_TableModifiers_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.class, + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getSnapshotTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + snapshotTime_ = null; + if (snapshotTimeBuilder_ != null) { + snapshotTimeBuilder_.dispose(); + snapshotTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .internal_static_google_cloud_bigquery_storage_v1beta1_TableModifiers_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers build() { + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + buildPartial() { + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers result = + new com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.snapshotTime_ = + snapshotTimeBuilder_ == null ? snapshotTime_ : snapshotTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers other) { + if (other + == com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + .getDefaultInstance()) return this; + if (other.hasSnapshotTime()) { + mergeSnapshotTime(other.getSnapshotTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getSnapshotTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp snapshotTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + snapshotTimeBuilder_; + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return Whether the snapshotTime field is set. + */ + public boolean hasSnapshotTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return The snapshotTime. + */ + public com.google.protobuf.Timestamp getSnapshotTime() { + if (snapshotTimeBuilder_ == null) { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } else { + return snapshotTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder setSnapshotTime(com.google.protobuf.Timestamp value) { + if (snapshotTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + snapshotTime_ = value; + } else { + snapshotTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder setSnapshotTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (snapshotTimeBuilder_ == null) { + snapshotTime_ = builderForValue.build(); + } else { + snapshotTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder mergeSnapshotTime(com.google.protobuf.Timestamp value) { + if (snapshotTimeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && snapshotTime_ != null + && snapshotTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getSnapshotTimeBuilder().mergeFrom(value); + } else { + snapshotTime_ = value; + } + } else { + snapshotTimeBuilder_.mergeFrom(value); + } + if (snapshotTime_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder clearSnapshotTime() { + bitField0_ = (bitField0_ & ~0x00000001); + snapshotTime_ = null; + if (snapshotTimeBuilder_ != null) { + snapshotTimeBuilder_.dispose(); + snapshotTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public com.google.protobuf.Timestamp.Builder getSnapshotTimeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getSnapshotTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getSnapshotTimeOrBuilder() { + if (snapshotTimeBuilder_ != null) { + return snapshotTimeBuilder_.getMessageOrBuilder(); + } else { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getSnapshotTimeFieldBuilder() { + if (snapshotTimeBuilder_ == null) { + snapshotTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getSnapshotTime(), getParentForChildren(), isClean()); + snapshotTime_ = null; + } + return snapshotTimeBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta1.TableModifiers) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.TableModifiers) + private static final com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto + .TableModifiers + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers(); + } + + public static com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableModifiers parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableModifiers + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_TableReference_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_TableReference_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta1_TableModifiers_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta1_TableModifiers_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n;google/cloud/bigquery/storage/v1beta1/" + + "table_reference.proto\022%google.cloud.bigq" + + "uery.storage.v1beta1\032\037google/protobuf/ti" + + "mestamp.proto\"J\n\016TableReference\022\022\n\nproje" + + "ct_id\030\001 \001(\t\022\022\n\ndataset_id\030\002 \001(\t\022\020\n\010table" + + "_id\030\003 \001(\t\"C\n\016TableModifiers\0221\n\rsnapshot_" + + "time\030\001 \001(\0132\032.google.protobuf.TimestampB\205" + + "\001\n)com.google.cloud.bigquery.storage.v1b" + + "eta1B\023TableReferenceProtoZCcloud.google." + + "com/go/bigquery/storage/apiv1beta1/stora" + + "gepb;storagepbb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.TimestampProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1beta1_TableReference_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta1_TableReference_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_TableReference_descriptor, + new java.lang.String[] { + "ProjectId", "DatasetId", "TableId", + }); + internal_static_google_cloud_bigquery_storage_v1beta1_TableModifiers_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta1_TableModifiers_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta1_TableModifiers_descriptor, + new java.lang.String[] { + "SnapshotTime", + }); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/arrow.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/arrow.proto new file mode 100644 index 000000000000..378975cf079e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/arrow.proto @@ -0,0 +1,36 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta1/storagepb;storagepb"; +option java_outer_classname = "ArrowProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Arrow schema. +message ArrowSchema { + // IPC serialized Arrow schema. + bytes serialized_schema = 1; +} + +// Arrow RecordBatch. +message ArrowRecordBatch { + // IPC serialized Arrow RecordBatch. + bytes serialized_record_batch = 1; + + // The count of rows in the returning block. + int64 row_count = 2; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/avro.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/avro.proto new file mode 100644 index 000000000000..ccb76f2db7e3 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/avro.proto @@ -0,0 +1,37 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta1/storagepb;storagepb"; +option java_outer_classname = "AvroProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Avro schema. +message AvroSchema { + // Json serialized schema, as described at + // https://avro.apache.org/docs/1.8.1/spec.html + string schema = 1; +} + +// Avro rows. +message AvroRows { + // Binary serialized rows in a block. + bytes serialized_binary_rows = 1; + + // The count of rows in the returning block. + int64 row_count = 2; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/read_options.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/read_options.proto new file mode 100644 index 000000000000..0fe7d2b3049d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/read_options.proto @@ -0,0 +1,84 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta1/storagepb;storagepb"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Options dictating how we read a table. +message TableReadOptions { + // Optional. The names of the fields in the table to be returned. If no + // field names are specified, then all fields in the table are returned. + // + // Nested fields -- the child elements of a STRUCT field -- can be selected + // individually using their fully-qualified names, and will be returned as + // record fields containing only the selected nested fields. If a STRUCT + // field is specified in the selected fields list, all of the child elements + // will be returned. + // + // As an example, consider a table with the following schema: + // + // { + // "name": "struct_field", + // "type": "RECORD", + // "mode": "NULLABLE", + // "fields": [ + // { + // "name": "string_field1", + // "type": "STRING", + // . "mode": "NULLABLE" + // }, + // { + // "name": "string_field2", + // "type": "STRING", + // "mode": "NULLABLE" + // } + // ] + // } + // + // Specifying "struct_field" in the selected fields list will result in a + // read session schema with the following logical structure: + // + // struct_field { + // string_field1 + // string_field2 + // } + // + // Specifying "struct_field.string_field1" in the selected fields list will + // result in a read session schema with the following logical structure: + // + // struct_field { + // string_field1 + // } + // + // The order of the fields in the read session schema is derived from the + // table schema and does not correspond to the order in which the fields are + // specified in this list. + repeated string selected_fields = 1; + + // Optional. SQL text filtering statement, similar to a WHERE clause in + // a SQL query. Aggregates are not supported. + // + // Examples: "int_field > 5" + // "date_field = CAST('2014-9-27' as DATE)" + // "nullable_field is not NULL" + // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" + // "numeric_field BETWEEN 1.0 AND 5.0" + // + // Restricted to a maximum length for 1 MB. + string row_restriction = 2; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/storage.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/storage.proto new file mode 100644 index 000000000000..5cd150e38ddd --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/storage.proto @@ -0,0 +1,429 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1beta1/arrow.proto"; +import "google/cloud/bigquery/storage/v1beta1/avro.proto"; +import "google/cloud/bigquery/storage/v1beta1/read_options.proto"; +import "google/cloud/bigquery/storage/v1beta1/table_reference.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta1/storagepb;storagepb"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// BigQuery storage API. +// +// The BigQuery storage API can be used to read data stored in BigQuery. +// +// The v1beta1 API is not yet officially deprecated, and will go through a full +// deprecation cycle (https://cloud.google.com/products#product-launch-stages) +// before the service is turned down. However, new code should use the v1 API +// going forward. +service BigQueryStorage { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a new read session. A read session divides the contents of a + // BigQuery table into one or more streams, which can then be used to read + // data from the table. The read session also specifies properties of the + // data to be read, such as a list of columns or a push-down filter describing + // the rows to be returned. + // + // A particular row can be read by at most one stream. When the caller has + // reached the end of each stream in the session, then all the data in the + // table has been read. + // + // Read sessions automatically expire 6 hours after they are created and do + // not require manual clean-up by the caller. + rpc CreateReadSession(CreateReadSessionRequest) returns (ReadSession) { + option (google.api.http) = { + post: "/v1beta1/{table_reference.project_id=projects/*}" + body: "*" + additional_bindings { + post: "/v1beta1/{table_reference.dataset_id=projects/*/datasets/*}" + body: "*" + } + }; + option (google.api.method_signature) = + "table_reference,parent,requested_streams"; + } + + // Reads rows from the table in the format prescribed by the read session. + // Each response contains one or more table rows, up to a maximum of 10 MiB + // per response; read requests which attempt to read individual rows larger + // than this will fail. + // + // Each request also returns a set of stream statistics reflecting the + // estimated total number of rows in the read stream. This number is computed + // based on the total table size and the number of active streams in the read + // session, and may change as other streams continue to read data. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { + get: "/v1beta1/{read_position.stream.name=projects/*/streams/*}" + }; + option (google.api.method_signature) = "read_position"; + } + + // Creates additional streams for a ReadSession. This API can be used to + // dynamically adjust the parallelism of a batch processing task upwards by + // adding additional workers. + rpc BatchCreateReadSessionStreams(BatchCreateReadSessionStreamsRequest) + returns (BatchCreateReadSessionStreamsResponse) { + option (google.api.http) = { + post: "/v1beta1/{session.name=projects/*/sessions/*}" + body: "*" + }; + option (google.api.method_signature) = "session,requested_streams"; + } + + // Causes a single stream in a ReadSession to gracefully stop. This + // API can be used to dynamically adjust the parallelism of a batch processing + // task downwards without losing data. + // + // This API does not delete the stream -- it remains visible in the + // ReadSession, and any data processed by the stream is not released to other + // streams. However, no additional data will be assigned to the stream once + // this call completes. Callers must continue reading data on the stream until + // the end of the stream is reached so that data which has already been + // assigned to the stream will be processed. + // + // This method will return an error if there are no other live streams + // in the Session, or if SplitReadStream() has been called on the given + // Stream. + rpc FinalizeStream(FinalizeStreamRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1beta1/{stream.name=projects/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "stream"; + } + + // Splits a given read stream into two Streams. These streams are referred to + // as the primary and the residual of the split. The original stream can still + // be read from in the same manner as before. Both of the returned streams can + // also be read from, and the total rows return by both child streams will be + // the same as the rows read from the original stream. + // + // Moreover, the two child streams will be allocated back to back in the + // original Stream. Concretely, it is guaranteed that for streams Original, + // Primary, and Residual, that Original[0-j] = Primary[0-j] and + // Original[j-n] = Residual[0-m] once the streams have been read to + // completion. + // + // This method is guaranteed to be idempotent. + rpc SplitReadStream(SplitReadStreamRequest) + returns (SplitReadStreamResponse) { + option (google.api.http) = { + get: "/v1beta1/{original_stream.name=projects/*/streams/*}" + }; + option (google.api.method_signature) = "original_stream"; + } +} + +// Information about a single data stream within a read session. +message Stream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/Stream" + pattern: "projects/{project}/locations/{location}/streams/{stream}" + }; + + // Name of the stream, in the form + // `projects/{project_id}/locations/{location}/streams/{stream_id}`. + string name = 1; +} + +// Expresses a point within a given stream using an offset position. +message StreamPosition { + // Identifier for a given Stream. + Stream stream = 1; + + // Position in the stream. + int64 offset = 2; +} + +// Information returned from a `CreateReadSession` request. +message ReadSession { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadSession" + pattern: "projects/{project}/locations/{location}/sessions/{session}" + }; + + // Unique identifier for the session, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}`. + string name = 1; + + // Time at which the session becomes invalid. After this time, subsequent + // requests to read this Session will return errors. + google.protobuf.Timestamp expire_time = 2; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. + oneof schema { + // Avro schema. + AvroSchema avro_schema = 5; + + // Arrow schema. + ArrowSchema arrow_schema = 6; + } + + // Streams associated with this session. + repeated Stream streams = 4; + + // Table that this ReadSession is reading from. + TableReference table_reference = 7; + + // Any modifiers which are applied when reading from the specified table. + TableModifiers table_modifiers = 8; + + // The strategy to use for distributing data among the streams. + ShardingStrategy sharding_strategy = 9; +} + +// Data format for input or output data. +enum DataFormat { + // Data format is unspecified. + DATA_FORMAT_UNSPECIFIED = 0; + + // Avro is a standard open source row based file format. + // See https://avro.apache.org/ for more details. + AVRO = 1; + + // Arrow is a standard open source column-based message format. + // See https://arrow.apache.org/ for more details. + ARROW = 3; +} + +// Strategy for distributing data among multiple streams in a read session. +enum ShardingStrategy { + // Same as LIQUID. + SHARDING_STRATEGY_UNSPECIFIED = 0; + + // Assigns data to each stream based on the client's read rate. The faster the + // client reads from a stream, the more data is assigned to the stream. In + // this strategy, it's possible to read all data from a single stream even if + // there are other streams present. + LIQUID = 1; + + // Assigns data to each stream such that roughly the same number of rows can + // be read from each stream. Because the server-side unit for assigning data + // is collections of rows, the API does not guarantee that each stream will + // return the same number or rows. Additionally, the limits are enforced based + // on the number of pre-filtering rows, so some filters can lead to lopsided + // assignments. + BALANCED = 2; +} + +// Creates a new read session, which may include additional options such as +// requested parallelism, projection filters and constraints. +message CreateReadSessionRequest { + // Required. Reference to the table to read. + TableReference table_reference = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. String of the form `projects/{project_id}` indicating the + // project this ReadSession is associated with. This is the project that will + // be billed for usage. + string parent = 6 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Any modifiers to the Table (e.g. snapshot timestamp). + TableModifiers table_modifiers = 2; + + // Initial number of streams. If unset or 0, we will + // provide a value of streams so as to produce reasonable throughput. Must be + // non-negative. The number of streams may be lower than the requested number, + // depending on the amount parallelism that is reasonable for the table and + // the maximum amount of parallelism allowed by the system. + // + // Streams must be read starting from offset 0. + int32 requested_streams = 3; + + // Read options for this session (e.g. column selection, filters). + TableReadOptions read_options = 4; + + // Data output format. Currently default to Avro. + // DATA_FORMAT_UNSPECIFIED not supported. + DataFormat format = 5; + + // The strategy to use for distributing data among multiple streams. Currently + // defaults to liquid sharding. + ShardingStrategy sharding_strategy = 7; +} + +// Requesting row data via `ReadRows` must provide Stream position information. +message ReadRowsRequest { + // Required. Identifier of the position in the stream to start reading from. + // The offset requested must be less than the last row read from ReadRows. + // Requesting a larger offset is undefined. + StreamPosition read_position = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Progress information for a given Stream. +message StreamStatus { + // Number of estimated rows in the current stream. May change over time as + // different readers in the stream progress at rates which are relatively fast + // or slow. + int64 estimated_row_count = 1; + + // A value in the range [0.0, 1.0] that represents the fraction of rows + // assigned to this stream that have been processed by the server. In the + // presence of read filters, the server may process more rows than it returns, + // so this value reflects progress through the pre-filtering rows. + // + // This value is only populated for sessions created through the BALANCED + // sharding strategy. + float fraction_consumed = 2; + + // Represents the progress of the current stream. + Progress progress = 4; + + // Whether this stream can be split. For sessions that use the LIQUID sharding + // strategy, this value is always false. For BALANCED sessions, this value is + // false when enough data have been read such that no more splits are possible + // at that point or beyond. For small tables or streams that are the result of + // a chain of splits, this value may never be true. + bool is_splittable = 3; +} + +message Progress { + // The fraction of rows assigned to the stream that have been processed by the + // server so far, not including the rows in the current response message. + // + // This value, along with `at_response_end`, can be used to interpolate the + // progress made as the rows in the message are being processed using the + // following formula: `at_response_start + (at_response_end - + // at_response_start) * rows_processed_from_response / rows_in_response`. + // + // Note that if a filter is provided, the `at_response_end` value of the + // previous response may not necessarily be equal to the `at_response_start` + // value of the current response. + float at_response_start = 1; + + // Similar to `at_response_start`, except that this value includes the rows in + // the current response. + float at_response_end = 2; +} + +// Information on if the current connection is being throttled. +message ThrottleStatus { + // How much this connection is being throttled. + // 0 is no throttling, 100 is completely throttled. + int32 throttle_percent = 1; +} + +// Response from calling `ReadRows` may include row data, progress and +// throttling information. +message ReadRowsResponse { + // Row data is returned in format specified during session creation. + oneof rows { + // Serialized row data in AVRO format. + AvroRows avro_rows = 3; + + // Serialized row data in Arrow RecordBatch format. + ArrowRecordBatch arrow_record_batch = 4; + } + + // Number of serialized rows in the rows block. This value is recorded here, + // in addition to the row_count values in the output-specific messages in + // `rows`, so that code which needs to record progress through the stream can + // do so in an output format-independent way. + int64 row_count = 6; + + // Estimated stream statistics. + StreamStatus status = 2; + + // Throttling status. If unset, the latest response still describes + // the current throttling status. + ThrottleStatus throttle_status = 5; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. This schema is equivalent to the one returned by + // CreateSession. This field is only populated in the first ReadRowsResponse + // RPC. + oneof schema { + // Output only. Avro schema. + AvroSchema avro_schema = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Arrow schema. + ArrowSchema arrow_schema = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + } +} + +// Information needed to request additional streams for an established read +// session. +message BatchCreateReadSessionStreamsRequest { + // Required. Must be a non-expired session obtained from a call to + // CreateReadSession. Only the name field needs to be set. + ReadSession session = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Number of new streams requested. Must be positive. + // Number of added streams may be less than this, see CreateReadSessionRequest + // for more information. + int32 requested_streams = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// The response from `BatchCreateReadSessionStreams` returns the stream +// identifiers for the newly created streams. +message BatchCreateReadSessionStreamsResponse { + // Newly added streams. + repeated Stream streams = 1; +} + +// Request information for invoking `FinalizeStream`. +message FinalizeStreamRequest { + // Required. Stream to finalize. + Stream stream = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request information for `SplitReadStream`. +message SplitReadStreamRequest { + // Required. Stream to split. + Stream original_stream = 1 [(google.api.field_behavior) = REQUIRED]; + + // A value in the range (0.0, 1.0) that specifies the fractional point at + // which the original stream should be split. The actual split point is + // evaluated on pre-filtered rows, so if a filter is provided, then there is + // no guarantee that the division of the rows between the new child streams + // will be proportional to this fractional value. Additionally, because the + // server-side unit for assigning data is collections of rows, this fraction + // will always map to to a data storage boundary on the server side. + float fraction = 2; +} + +// Response from `SplitReadStream`. +message SplitReadStreamResponse { + // Primary stream, which contains the beginning portion of + // |original_stream|. An empty value indicates that the original stream can no + // longer be split. + Stream primary_stream = 1; + + // Remainder stream, which contains the tail of |original_stream|. An empty + // value indicates that the original stream can no longer be split. + Stream remainder_stream = 2; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/table_reference.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/table_reference.proto new file mode 100644 index 000000000000..99cd5d099c56 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/table_reference.proto @@ -0,0 +1,41 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +import "google/protobuf/timestamp.proto"; + +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta1/storagepb;storagepb"; +option java_outer_classname = "TableReferenceProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Table reference that includes just the 3 strings needed to identify a table. +message TableReference { + // The assigned project ID of the project. + string project_id = 1; + + // The ID of the dataset in the above project. + string dataset_id = 2; + + // The ID of the table in the above dataset. + string table_id = 3; +} + +// All fields in this message optional. +message TableModifiers { + // The snapshot time of the table. If not set, interpreted as now. + google.protobuf.Timestamp snapshot_time = 1; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/clirr-ignored-differences.xml b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/clirr-ignored-differences.xml new file mode 100644 index 000000000000..cbde88cdaa19 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/clirr-ignored-differences.xml @@ -0,0 +1,81 @@ + + + + + 7012 + com/google/cloud/bigquery/storage/v1beta2/*OrBuilder + * get*(*) + + + 7012 + com/google/cloud/bigquery/storage/v1beta2/*OrBuilder + boolean contains*(*) + + + 7012 + com/google/cloud/bigquery/storage/v1beta2/*OrBuilder + boolean has*(*) + + + + + 7006 + com/google/cloud/bigquery/storage/v1beta2/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta2/** + * addRepeatedField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta2/** + * clear() + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta2/** + * clearField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta2/** + * clearOneof(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta2/** + * clone() + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta2/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta2/** + * setField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta2/** + * setRepeatedField(*) + ** + + + 7006 + com/google/cloud/bigquery/storage/v1beta2/** + * setUnknownFields(*) + ** + + diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/pom.xml b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/pom.xml new file mode 100644 index 000000000000..12b8092dc452 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/pom.xml @@ -0,0 +1,42 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta2 + 0.191.1 + proto-google-cloud-bigquerystorage-v1beta2 + PROTO library for proto-google-cloud-bigquerystorage-v1beta2 + + com.google.cloud + google-cloud-bigquerystorage-parent + 3.19.1 + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api + api-common + + + com.google.guava + guava + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequest.java new file mode 100644 index 000000000000..fb1fc584c43d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequest.java @@ -0,0 +1,2734 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Request message for `AppendRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AppendRowsRequest} + */ +public final class AppendRowsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.AppendRowsRequest) + AppendRowsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AppendRowsRequest.newBuilder() to construct. + private AppendRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AppendRowsRequest() { + writeStream_ = ""; + traceId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AppendRowsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.class, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.Builder.class); + } + + public interface ProtoDataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Proto schema used to serialize the data.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + boolean hasWriterSchema(); + + /** + * + * + *
+     * Proto schema used to serialize the data.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + * + * @return The writerSchema. + */ + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema getWriterSchema(); + + /** + * + * + *
+     * Proto schema used to serialize the data.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + */ + com.google.cloud.bigquery.storage.v1beta2.ProtoSchemaOrBuilder getWriterSchemaOrBuilder(); + + /** + * + * + *
+     * Serialized row data in protobuf message format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + * + * @return Whether the rows field is set. + */ + boolean hasRows(); + + /** + * + * + *
+     * Serialized row data in protobuf message format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + * + * @return The rows. + */ + com.google.cloud.bigquery.storage.v1beta2.ProtoRows getRows(); + + /** + * + * + *
+     * Serialized row data in protobuf message format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + */ + com.google.cloud.bigquery.storage.v1beta2.ProtoRowsOrBuilder getRowsOrBuilder(); + } + + /** + * + * + *
+   * Proto schema and data.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData} + */ + public static final class ProtoData extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) + ProtoDataOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ProtoData.newBuilder() to construct. + private ProtoData(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ProtoData() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ProtoData(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_ProtoData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_ProtoData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData.class, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData.Builder.class); + } + + private int bitField0_; + public static final int WRITER_SCHEMA_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1beta2.ProtoSchema writerSchema_; + + /** + * + * + *
+     * Proto schema used to serialize the data.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + @java.lang.Override + public boolean hasWriterSchema() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Proto schema used to serialize the data.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + * + * @return The writerSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ProtoSchema getWriterSchema() { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.getDefaultInstance() + : writerSchema_; + } + + /** + * + * + *
+     * Proto schema used to serialize the data.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ProtoSchemaOrBuilder + getWriterSchemaOrBuilder() { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.getDefaultInstance() + : writerSchema_; + } + + public static final int ROWS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta2.ProtoRows rows_; + + /** + * + * + *
+     * Serialized row data in protobuf message format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + * + * @return Whether the rows field is set. + */ + @java.lang.Override + public boolean hasRows() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Serialized row data in protobuf message format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + * + * @return The rows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ProtoRows getRows() { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ProtoRows.getDefaultInstance() + : rows_; + } + + /** + * + * + *
+     * Serialized row data in protobuf message format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ProtoRowsOrBuilder getRowsOrBuilder() { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ProtoRows.getDefaultInstance() + : rows_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasWriterSchema()) { + if (!getWriterSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getWriterSchema()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getRows()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getWriterSchema()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getRows()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData other = + (com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) obj; + + if (hasWriterSchema() != other.hasWriterSchema()) return false; + if (hasWriterSchema()) { + if (!getWriterSchema().equals(other.getWriterSchema())) return false; + } + if (hasRows() != other.hasRows()) return false; + if (hasRows()) { + if (!getRows().equals(other.getRows())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasWriterSchema()) { + hash = (37 * hash) + WRITER_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getWriterSchema().hashCode(); + } + if (hasRows()) { + hash = (37 * hash) + ROWS_FIELD_NUMBER; + hash = (53 * hash) + getRows().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Proto schema and data.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_ProtoData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_ProtoData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData.class, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getWriterSchemaFieldBuilder(); + getRowsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + writerSchema_ = null; + if (writerSchemaBuilder_ != null) { + writerSchemaBuilder_.dispose(); + writerSchemaBuilder_ = null; + } + rows_ = null; + if (rowsBuilder_ != null) { + rowsBuilder_.dispose(); + rowsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_ProtoData_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData build() { + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData result = + new com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.writerSchema_ = + writerSchemaBuilder_ == null ? writerSchema_ : writerSchemaBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.rows_ = rowsBuilder_ == null ? rows_ : rowsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + .getDefaultInstance()) return this; + if (other.hasWriterSchema()) { + mergeWriterSchema(other.getWriterSchema()); + } + if (other.hasRows()) { + mergeRows(other.getRows()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (hasWriterSchema()) { + if (!getWriterSchema().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getWriterSchemaFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getRowsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1beta2.ProtoSchema writerSchema_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema, + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ProtoSchemaOrBuilder> + writerSchemaBuilder_; + + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + public boolean hasWriterSchema() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + * + * @return The writerSchema. + */ + public com.google.cloud.bigquery.storage.v1beta2.ProtoSchema getWriterSchema() { + if (writerSchemaBuilder_ == null) { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.getDefaultInstance() + : writerSchema_; + } else { + return writerSchemaBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + */ + public Builder setWriterSchema(com.google.cloud.bigquery.storage.v1beta2.ProtoSchema value) { + if (writerSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writerSchema_ = value; + } else { + writerSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + */ + public Builder setWriterSchema( + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.Builder builderForValue) { + if (writerSchemaBuilder_ == null) { + writerSchema_ = builderForValue.build(); + } else { + writerSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + */ + public Builder mergeWriterSchema( + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema value) { + if (writerSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && writerSchema_ != null + && writerSchema_ + != com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.getDefaultInstance()) { + getWriterSchemaBuilder().mergeFrom(value); + } else { + writerSchema_ = value; + } + } else { + writerSchemaBuilder_.mergeFrom(value); + } + if (writerSchema_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + */ + public Builder clearWriterSchema() { + bitField0_ = (bitField0_ & ~0x00000001); + writerSchema_ = null; + if (writerSchemaBuilder_ != null) { + writerSchemaBuilder_.dispose(); + writerSchemaBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + */ + public com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.Builder + getWriterSchemaBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getWriterSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + */ + public com.google.cloud.bigquery.storage.v1beta2.ProtoSchemaOrBuilder + getWriterSchemaOrBuilder() { + if (writerSchemaBuilder_ != null) { + return writerSchemaBuilder_.getMessageOrBuilder(); + } else { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.getDefaultInstance() + : writerSchema_; + } + } + + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoSchema writer_schema = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema, + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ProtoSchemaOrBuilder> + getWriterSchemaFieldBuilder() { + if (writerSchemaBuilder_ == null) { + writerSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema, + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ProtoSchemaOrBuilder>( + getWriterSchema(), getParentForChildren(), isClean()); + writerSchema_ = null; + } + return writerSchemaBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta2.ProtoRows rows_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ProtoRows, + com.google.cloud.bigquery.storage.v1beta2.ProtoRows.Builder, + com.google.cloud.bigquery.storage.v1beta2.ProtoRowsOrBuilder> + rowsBuilder_; + + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + * + * @return Whether the rows field is set. + */ + public boolean hasRows() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + * + * @return The rows. + */ + public com.google.cloud.bigquery.storage.v1beta2.ProtoRows getRows() { + if (rowsBuilder_ == null) { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ProtoRows.getDefaultInstance() + : rows_; + } else { + return rowsBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + */ + public Builder setRows(com.google.cloud.bigquery.storage.v1beta2.ProtoRows value) { + if (rowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + } else { + rowsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + */ + public Builder setRows( + com.google.cloud.bigquery.storage.v1beta2.ProtoRows.Builder builderForValue) { + if (rowsBuilder_ == null) { + rows_ = builderForValue.build(); + } else { + rowsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + */ + public Builder mergeRows(com.google.cloud.bigquery.storage.v1beta2.ProtoRows value) { + if (rowsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && rows_ != null + && rows_ + != com.google.cloud.bigquery.storage.v1beta2.ProtoRows.getDefaultInstance()) { + getRowsBuilder().mergeFrom(value); + } else { + rows_ = value; + } + } else { + rowsBuilder_.mergeFrom(value); + } + if (rows_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + */ + public Builder clearRows() { + bitField0_ = (bitField0_ & ~0x00000002); + rows_ = null; + if (rowsBuilder_ != null) { + rowsBuilder_.dispose(); + rowsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.ProtoRows.Builder getRowsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRowsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.ProtoRowsOrBuilder getRowsOrBuilder() { + if (rowsBuilder_ != null) { + return rowsBuilder_.getMessageOrBuilder(); + } else { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ProtoRows.getDefaultInstance() + : rows_; + } + } + + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ProtoRows rows = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ProtoRows, + com.google.cloud.bigquery.storage.v1beta2.ProtoRows.Builder, + com.google.cloud.bigquery.storage.v1beta2.ProtoRowsOrBuilder> + getRowsFieldBuilder() { + if (rowsBuilder_ == null) { + rowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ProtoRows, + com.google.cloud.bigquery.storage.v1beta2.ProtoRows.Builder, + com.google.cloud.bigquery.storage.v1beta2.ProtoRowsOrBuilder>( + getRows(), getParentForChildren(), isClean()); + rows_ = null; + } + return rowsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) + private static final com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProtoData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + private int rowsCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object rows_; + + public enum RowsCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + PROTO_ROWS(4), + ROWS_NOT_SET(0); + private final int value; + + private RowsCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RowsCase valueOf(int value) { + return forNumber(value); + } + + public static RowsCase forNumber(int value) { + switch (value) { + case 4: + return PROTO_ROWS; + case 0: + return ROWS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public static final int WRITE_STREAM_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object writeStream_ = ""; + + /** + * + * + *
+   * Required. The stream that is the target of the append operation. This value
+   * must be specified for the initial request. If subsequent requests specify
+   * the stream name, it must equal to the value provided in the first request.
+   * To write to the _default stream, populate this field with a string in the
+   * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + @java.lang.Override + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The stream that is the target of the append operation. This value
+   * must be specified for the initial request. If subsequent requests specify
+   * the stream name, it must equal to the value provided in the first request.
+   * To write to the _default stream, populate this field with a string in the
+   * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + @java.lang.Override + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OFFSET_FIELD_NUMBER = 2; + private com.google.protobuf.Int64Value offset_; + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + @java.lang.Override + public boolean hasOffset() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + @java.lang.Override + public com.google.protobuf.Int64Value getOffset() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + @java.lang.Override + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + + public static final int PROTO_ROWS_FIELD_NUMBER = 4; + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return Whether the protoRows field is set. + */ + @java.lang.Override + public boolean hasProtoRows() { + return rowsCase_ == 4; + } + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return The protoRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData getProtoRows() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoDataOrBuilder + getProtoRowsOrBuilder() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + + public static final int TRACE_ID_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object traceId_ = ""; + + /** + * + * + *
+   * Id set by client to annotate its identity. Only initial request setting is
+   * respected.
+   * 
+ * + * string trace_id = 6; + * + * @return The traceId. + */ + @java.lang.Override + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } + } + + /** + * + * + *
+   * Id set by client to annotate its identity. Only initial request setting is
+   * respected.
+   * 
+ * + * string trace_id = 6; + * + * @return The bytes for traceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasProtoRows()) { + if (!getProtoRows().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, writeStream_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getOffset()); + } + if (rowsCase_ == 4) { + output.writeMessage( + 4, (com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) rows_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, traceId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, writeStream_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOffset()); + } + if (rowsCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) rows_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(traceId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, traceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest other = + (com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest) obj; + + if (!getWriteStream().equals(other.getWriteStream())) return false; + if (hasOffset() != other.hasOffset()) return false; + if (hasOffset()) { + if (!getOffset().equals(other.getOffset())) return false; + } + if (!getTraceId().equals(other.getTraceId())) return false; + if (!getRowsCase().equals(other.getRowsCase())) return false; + switch (rowsCase_) { + case 4: + if (!getProtoRows().equals(other.getProtoRows())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getWriteStream().hashCode(); + if (hasOffset()) { + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + getOffset().hashCode(); + } + hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; + hash = (53 * hash) + getTraceId().hashCode(); + switch (rowsCase_) { + case 4: + hash = (37 * hash) + PROTO_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getProtoRows().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `AppendRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AppendRowsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.AppendRowsRequest) + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.class, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getOffsetFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + writeStream_ = ""; + offset_ = null; + if (offsetBuilder_ != null) { + offsetBuilder_.dispose(); + offsetBuilder_ = null; + } + if (protoRowsBuilder_ != null) { + protoRowsBuilder_.clear(); + } + traceId_ = ""; + rowsCase_ = 0; + rows_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest build() { + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest result = + new com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.writeStream_ = writeStream_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.offset_ = offsetBuilder_ == null ? offset_ : offsetBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.traceId_ = traceId_; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest result) { + result.rowsCase_ = rowsCase_; + result.rows_ = this.rows_; + if (rowsCase_ == 4 && protoRowsBuilder_ != null) { + result.rows_ = protoRowsBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.getDefaultInstance()) + return this; + if (!other.getWriteStream().isEmpty()) { + writeStream_ = other.writeStream_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasOffset()) { + mergeOffset(other.getOffset()); + } + if (!other.getTraceId().isEmpty()) { + traceId_ = other.traceId_; + bitField0_ |= 0x00000008; + onChanged(); + } + switch (other.getRowsCase()) { + case PROTO_ROWS: + { + mergeProtoRows(other.getProtoRows()); + break; + } + case ROWS_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (hasProtoRows()) { + if (!getProtoRows().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + writeStream_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getOffsetFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 34: + { + input.readMessage(getProtoRowsFieldBuilder().getBuilder(), extensionRegistry); + rowsCase_ = 4; + break; + } // case 34 + case 50: + { + traceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int rowsCase_ = 0; + private java.lang.Object rows_; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public Builder clearRows() { + rowsCase_ = 0; + rows_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object writeStream_ = ""; + + /** + * + * + *
+     * Required. The stream that is the target of the append operation. This value
+     * must be specified for the initial request. If subsequent requests specify
+     * the stream name, it must equal to the value provided in the first request.
+     * To write to the _default stream, populate this field with a string in the
+     * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The stream that is the target of the append operation. This value
+     * must be specified for the initial request. If subsequent requests specify
+     * the stream name, it must equal to the value provided in the first request.
+     * To write to the _default stream, populate this field with a string in the
+     * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The stream that is the target of the append operation. This value
+     * must be specified for the initial request. If subsequent requests specify
+     * the stream name, it must equal to the value provided in the first request.
+     * To write to the _default stream, populate this field with a string in the
+     * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStream(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + writeStream_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The stream that is the target of the append operation. This value
+     * must be specified for the initial request. If subsequent requests specify
+     * the stream name, it must equal to the value provided in the first request.
+     * To write to the _default stream, populate this field with a string in the
+     * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearWriteStream() { + writeStream_ = getDefaultInstance().getWriteStream(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The stream that is the target of the append operation. This value
+     * must be specified for the initial request. If subsequent requests specify
+     * the stream name, it must equal to the value provided in the first request.
+     * To write to the _default stream, populate this field with a string in the
+     * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + writeStream_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Int64Value offset_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + offsetBuilder_; + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + public boolean hasOffset() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + public com.google.protobuf.Int64Value getOffset() { + if (offsetBuilder_ == null) { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } else { + return offsetBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder setOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + offset_ = value; + } else { + offsetBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder setOffset(com.google.protobuf.Int64Value.Builder builderForValue) { + if (offsetBuilder_ == null) { + offset_ = builderForValue.build(); + } else { + offsetBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder mergeOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && offset_ != null + && offset_ != com.google.protobuf.Int64Value.getDefaultInstance()) { + getOffsetBuilder().mergeFrom(value); + } else { + offset_ = value; + } + } else { + offsetBuilder_.mergeFrom(value); + } + if (offset_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder clearOffset() { + bitField0_ = (bitField0_ & ~0x00000002); + offset_ = null; + if (offsetBuilder_ != null) { + offsetBuilder_.dispose(); + offsetBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public com.google.protobuf.Int64Value.Builder getOffsetBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getOffsetFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + if (offsetBuilder_ != null) { + return offsetBuilder_.getMessageOrBuilder(); + } else { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + } + + /** + * + * + *
+     * If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + getOffsetFieldBuilder() { + if (offsetBuilder_ == null) { + offsetBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder>( + getOffset(), getParentForChildren(), isClean()); + offset_ = null; + } + return offsetBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData.Builder, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoDataOrBuilder> + protoRowsBuilder_; + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + * + * @return Whether the protoRows field is set. + */ + @java.lang.Override + public boolean hasProtoRows() { + return rowsCase_ == 4; + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + * + * @return The protoRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData getProtoRows() { + if (protoRowsBuilder_ == null) { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } else { + if (rowsCase_ == 4) { + return protoRowsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + public Builder setProtoRows( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData value) { + if (protoRowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + protoRowsBuilder_.setMessage(value); + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + public Builder setProtoRows( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData.Builder + builderForValue) { + if (protoRowsBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + protoRowsBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + public Builder mergeProtoRows( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData value) { + if (protoRowsBuilder_ == null) { + if (rowsCase_ == 4 + && rows_ + != com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + .getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 4) { + protoRowsBuilder_.mergeFrom(value); + } else { + protoRowsBuilder_.setMessage(value); + } + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + public Builder clearProtoRows() { + if (protoRowsBuilder_ == null) { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + } + protoRowsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData.Builder + getProtoRowsBuilder() { + return getProtoRowsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoDataOrBuilder + getProtoRowsOrBuilder() { + if ((rowsCase_ == 4) && (protoRowsBuilder_ != null)) { + return protoRowsBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + } + + /** + * + * + *
+     * Rows in proto format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData.Builder, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoDataOrBuilder> + getProtoRowsFieldBuilder() { + if (protoRowsBuilder_ == null) { + if (!(rowsCase_ == 4)) { + rows_ = + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + protoRowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData.Builder, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoDataOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData) rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 4; + onChanged(); + return protoRowsBuilder_; + } + + private java.lang.Object traceId_ = ""; + + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @return The traceId. + */ + public java.lang.String getTraceId() { + java.lang.Object ref = traceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + traceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @return The bytes for traceId. + */ + public com.google.protobuf.ByteString getTraceIdBytes() { + java.lang.Object ref = traceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + traceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @param value The traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + traceId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @return This builder for chaining. + */ + public Builder clearTraceId() { + traceId_ = getDefaultInstance().getTraceId(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
+     * Id set by client to annotate its identity. Only initial request setting is
+     * respected.
+     * 
+ * + * string trace_id = 6; + * + * @param value The bytes for traceId to set. + * @return This builder for chaining. + */ + public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + traceId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.AppendRowsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.AppendRowsRequest) + private static final com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AppendRowsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequestOrBuilder.java new file mode 100644 index 000000000000..059bc6bad638 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequestOrBuilder.java @@ -0,0 +1,178 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface AppendRowsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.AppendRowsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The stream that is the target of the append operation. This value
+   * must be specified for the initial request. If subsequent requests specify
+   * the stream name, it must equal to the value provided in the first request.
+   * To write to the _default stream, populate this field with a string in the
+   * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + java.lang.String getWriteStream(); + + /** + * + * + *
+   * Required. The stream that is the target of the append operation. This value
+   * must be specified for the initial request. If subsequent requests specify
+   * the stream name, it must equal to the value provided in the first request.
+   * To write to the _default stream, populate this field with a string in the
+   * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + com.google.protobuf.ByteString getWriteStreamBytes(); + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + boolean hasOffset(); + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + com.google.protobuf.Int64Value getOffset(); + + /** + * + * + *
+   * If present, the write is only performed if the next append offset is same
+   * as the provided value. If not present, the write is performed at the
+   * current end of stream. Specifying a value for this field is not allowed
+   * when calling AppendRows for the '_default' stream.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder(); + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return Whether the protoRows field is set. + */ + boolean hasProtoRows(); + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + * + * @return The protoRows. + */ + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData getProtoRows(); + + /** + * + * + *
+   * Rows in proto format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoData proto_rows = 4; + */ + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.ProtoDataOrBuilder + getProtoRowsOrBuilder(); + + /** + * + * + *
+   * Id set by client to annotate its identity. Only initial request setting is
+   * respected.
+   * 
+ * + * string trace_id = 6; + * + * @return The traceId. + */ + java.lang.String getTraceId(); + + /** + * + * + *
+   * Id set by client to annotate its identity. Only initial request setting is
+   * respected.
+   * 
+ * + * string trace_id = 6; + * + * @return The bytes for traceId. + */ + com.google.protobuf.ByteString getTraceIdBytes(); + + com.google.cloud.bigquery.storage.v1beta2.AppendRowsRequest.RowsCase getRowsCase(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponse.java new file mode 100644 index 000000000000..0057168f62eb --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponse.java @@ -0,0 +1,2524 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Response message for `AppendRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AppendRowsResponse} + */ +public final class AppendRowsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.AppendRowsResponse) + AppendRowsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AppendRowsResponse.newBuilder() to construct. + private AppendRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AppendRowsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AppendRowsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.class, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.Builder.class); + } + + public interface AppendResultOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return Whether the offset field is set. + */ + boolean hasOffset(); + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return The offset. + */ + com.google.protobuf.Int64Value getOffset(); + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder(); + } + + /** + * + * + *
+   * AppendResult is returned for successful append requests.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult} + */ + public static final class AppendResult extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) + AppendResultOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AppendResult.newBuilder() to construct. + private AppendResult(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AppendResult() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AppendResult(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_AppendResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_AppendResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult.class, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult.Builder + .class); + } + + private int bitField0_; + public static final int OFFSET_FIELD_NUMBER = 1; + private com.google.protobuf.Int64Value offset_; + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return Whether the offset field is set. + */ + @java.lang.Override + public boolean hasOffset() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return The offset. + */ + @java.lang.Override + public com.google.protobuf.Int64Value getOffset() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + + /** + * + * + *
+     * The row offset at which the last append occurred. The offset will not be
+     * set if appending using default streams.
+     * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + @java.lang.Override + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getOffset()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getOffset()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult other = + (com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) obj; + + if (hasOffset() != other.hasOffset()) return false; + if (hasOffset()) { + if (!getOffset().equals(other.getOffset())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasOffset()) { + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + getOffset().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * AppendResult is returned for successful append requests.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResultOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_AppendResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_AppendResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult.class, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getOffsetFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + offset_ = null; + if (offsetBuilder_ != null) { + offsetBuilder_.dispose(); + offsetBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_AppendResult_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult build() { + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult result = + new com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.offset_ = offsetBuilder_ == null ? offset_ : offsetBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + .getDefaultInstance()) return this; + if (other.hasOffset()) { + mergeOffset(other.getOffset()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getOffsetFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Int64Value offset_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + offsetBuilder_; + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return Whether the offset field is set. + */ + public boolean hasOffset() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + * + * @return The offset. + */ + public com.google.protobuf.Int64Value getOffset() { + if (offsetBuilder_ == null) { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } else { + return offsetBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public Builder setOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + offset_ = value; + } else { + offsetBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public Builder setOffset(com.google.protobuf.Int64Value.Builder builderForValue) { + if (offsetBuilder_ == null) { + offset_ = builderForValue.build(); + } else { + offsetBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public Builder mergeOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && offset_ != null + && offset_ != com.google.protobuf.Int64Value.getDefaultInstance()) { + getOffsetBuilder().mergeFrom(value); + } else { + offset_ = value; + } + } else { + offsetBuilder_.mergeFrom(value); + } + if (offset_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public Builder clearOffset() { + bitField0_ = (bitField0_ & ~0x00000001); + offset_ = null; + if (offsetBuilder_ != null) { + offsetBuilder_.dispose(); + offsetBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public com.google.protobuf.Int64Value.Builder getOffsetBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getOffsetFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + if (offsetBuilder_ != null) { + return offsetBuilder_.getMessageOrBuilder(); + } else { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + } + + /** + * + * + *
+       * The row offset at which the last append occurred. The offset will not be
+       * set if appending using default streams.
+       * 
+ * + * .google.protobuf.Int64Value offset = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + getOffsetFieldBuilder() { + if (offsetBuilder_ == null) { + offsetBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder>( + getOffset(), getParentForChildren(), isClean()); + offset_ = null; + } + return offsetBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) + private static final com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AppendResult parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + private int responseCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object response_; + + public enum ResponseCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + APPEND_RESULT(1), + ERROR(2), + RESPONSE_NOT_SET(0); + private final int value; + + private ResponseCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ResponseCase valueOf(int value) { + return forNumber(value); + } + + public static ResponseCase forNumber(int value) { + switch (value) { + case 1: + return APPEND_RESULT; + case 2: + return ERROR; + case 0: + return RESPONSE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ResponseCase getResponseCase() { + return ResponseCase.forNumber(responseCase_); + } + + public static final int APPEND_RESULT_FIELD_NUMBER = 1; + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return Whether the appendResult field is set. + */ + @java.lang.Override + public boolean hasAppendResult() { + return responseCase_ == 1; + } + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return The appendResult. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + getAppendResult() { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) response_; + } + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResultOrBuilder + getAppendResultOrBuilder() { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) response_; + } + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + + public static final int ERROR_FIELD_NUMBER = 2; + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   *
+   * Additional information about error signalling:
+   *
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   *
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   *
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   *
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   *
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + * + * @return Whether the error field is set. + */ + @java.lang.Override + public boolean hasError() { + return responseCase_ == 2; + } + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   *
+   * Additional information about error signalling:
+   *
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   *
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   *
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   *
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   *
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + * + * @return The error. + */ + @java.lang.Override + public com.google.rpc.Status getError() { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   *
+   * Additional information about error signalling:
+   *
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   *
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   *
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   *
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   *
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } + + public static final int UPDATED_SCHEMA_FIELD_NUMBER = 3; + private com.google.cloud.bigquery.storage.v1beta2.TableSchema updatedSchema_; + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + * + * @return Whether the updatedSchema field is set. + */ + @java.lang.Override + public boolean hasUpdatedSchema() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + * + * @return The updatedSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableSchema getUpdatedSchema() { + return updatedSchema_ == null + ? com.google.cloud.bigquery.storage.v1beta2.TableSchema.getDefaultInstance() + : updatedSchema_; + } + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder + getUpdatedSchemaOrBuilder() { + return updatedSchema_ == null + ? com.google.cloud.bigquery.storage.v1beta2.TableSchema.getDefaultInstance() + : updatedSchema_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (responseCase_ == 1) { + output.writeMessage( + 1, (com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) response_); + } + if (responseCase_ == 2) { + output.writeMessage(2, (com.google.rpc.Status) response_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getUpdatedSchema()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (responseCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, + (com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) + response_); + } + if (responseCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.rpc.Status) response_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getUpdatedSchema()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse other = + (com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse) obj; + + if (hasUpdatedSchema() != other.hasUpdatedSchema()) return false; + if (hasUpdatedSchema()) { + if (!getUpdatedSchema().equals(other.getUpdatedSchema())) return false; + } + if (!getResponseCase().equals(other.getResponseCase())) return false; + switch (responseCase_) { + case 1: + if (!getAppendResult().equals(other.getAppendResult())) return false; + break; + case 2: + if (!getError().equals(other.getError())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasUpdatedSchema()) { + hash = (37 * hash) + UPDATED_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getUpdatedSchema().hashCode(); + } + switch (responseCase_) { + case 1: + hash = (37 * hash) + APPEND_RESULT_FIELD_NUMBER; + hash = (53 * hash) + getAppendResult().hashCode(); + break; + case 2: + hash = (37 * hash) + ERROR_FIELD_NUMBER; + hash = (53 * hash) + getError().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for `AppendRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AppendRowsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.AppendRowsResponse) + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.class, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getUpdatedSchemaFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (appendResultBuilder_ != null) { + appendResultBuilder_.clear(); + } + if (errorBuilder_ != null) { + errorBuilder_.clear(); + } + updatedSchema_ = null; + if (updatedSchemaBuilder_ != null) { + updatedSchemaBuilder_.dispose(); + updatedSchemaBuilder_ = null; + } + responseCase_ = 0; + response_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse build() { + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse result = + new com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.updatedSchema_ = + updatedSchemaBuilder_ == null ? updatedSchema_ : updatedSchemaBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse result) { + result.responseCase_ = responseCase_; + result.response_ = this.response_; + if (responseCase_ == 1 && appendResultBuilder_ != null) { + result.response_ = appendResultBuilder_.build(); + } + if (responseCase_ == 2 && errorBuilder_ != null) { + result.response_ = errorBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.getDefaultInstance()) + return this; + if (other.hasUpdatedSchema()) { + mergeUpdatedSchema(other.getUpdatedSchema()); + } + switch (other.getResponseCase()) { + case APPEND_RESULT: + { + mergeAppendResult(other.getAppendResult()); + break; + } + case ERROR: + { + mergeError(other.getError()); + break; + } + case RESPONSE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getAppendResultFieldBuilder().getBuilder(), extensionRegistry); + responseCase_ = 1; + break; + } // case 10 + case 18: + { + input.readMessage(getErrorFieldBuilder().getBuilder(), extensionRegistry); + responseCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage(getUpdatedSchemaFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int responseCase_ = 0; + private java.lang.Object response_; + + public ResponseCase getResponseCase() { + return ResponseCase.forNumber(responseCase_); + } + + public Builder clearResponse() { + responseCase_ = 0; + response_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult.Builder, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResultOrBuilder> + appendResultBuilder_; + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return Whether the appendResult field is set. + */ + @java.lang.Override + public boolean hasAppendResult() { + return responseCase_ == 1; + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return The appendResult. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + getAppendResult() { + if (appendResultBuilder_ == null) { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) + response_; + } + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } else { + if (responseCase_ == 1) { + return appendResultBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public Builder setAppendResult( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult value) { + if (appendResultBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + response_ = value; + onChanged(); + } else { + appendResultBuilder_.setMessage(value); + } + responseCase_ = 1; + return this; + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public Builder setAppendResult( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult.Builder + builderForValue) { + if (appendResultBuilder_ == null) { + response_ = builderForValue.build(); + onChanged(); + } else { + appendResultBuilder_.setMessage(builderForValue.build()); + } + responseCase_ = 1; + return this; + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public Builder mergeAppendResult( + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult value) { + if (appendResultBuilder_ == null) { + if (responseCase_ == 1 + && response_ + != com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + .getDefaultInstance()) { + response_ = + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) + response_) + .mergeFrom(value) + .buildPartial(); + } else { + response_ = value; + } + onChanged(); + } else { + if (responseCase_ == 1) { + appendResultBuilder_.mergeFrom(value); + } else { + appendResultBuilder_.setMessage(value); + } + } + responseCase_ = 1; + return this; + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public Builder clearAppendResult() { + if (appendResultBuilder_ == null) { + if (responseCase_ == 1) { + responseCase_ = 0; + response_ = null; + onChanged(); + } + } else { + if (responseCase_ == 1) { + responseCase_ = 0; + response_ = null; + } + appendResultBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult.Builder + getAppendResultBuilder() { + return getAppendResultFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResultOrBuilder + getAppendResultOrBuilder() { + if ((responseCase_ == 1) && (appendResultBuilder_ != null)) { + return appendResultBuilder_.getMessageOrBuilder(); + } else { + if (responseCase_ == 1) { + return (com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) + response_; + } + return com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + } + + /** + * + * + *
+     * Result if the append is successful.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult.Builder, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResultOrBuilder> + getAppendResultFieldBuilder() { + if (appendResultBuilder_ == null) { + if (!(responseCase_ == 1)) { + response_ = + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult + .getDefaultInstance(); + } + appendResultBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult.Builder, + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResultOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult) + response_, + getParentForChildren(), + isClean()); + response_ = null; + } + responseCase_ = 1; + onChanged(); + return appendResultBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + errorBuilder_; + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + * + * @return Whether the error field is set. + */ + @java.lang.Override + public boolean hasError() { + return responseCase_ == 2; + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + * + * @return The error. + */ + @java.lang.Override + public com.google.rpc.Status getError() { + if (errorBuilder_ == null) { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } else { + if (responseCase_ == 2) { + return errorBuilder_.getMessage(); + } + return com.google.rpc.Status.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder setError(com.google.rpc.Status value) { + if (errorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + response_ = value; + onChanged(); + } else { + errorBuilder_.setMessage(value); + } + responseCase_ = 2; + return this; + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder setError(com.google.rpc.Status.Builder builderForValue) { + if (errorBuilder_ == null) { + response_ = builderForValue.build(); + onChanged(); + } else { + errorBuilder_.setMessage(builderForValue.build()); + } + responseCase_ = 2; + return this; + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder mergeError(com.google.rpc.Status value) { + if (errorBuilder_ == null) { + if (responseCase_ == 2 && response_ != com.google.rpc.Status.getDefaultInstance()) { + response_ = + com.google.rpc.Status.newBuilder((com.google.rpc.Status) response_) + .mergeFrom(value) + .buildPartial(); + } else { + response_ = value; + } + onChanged(); + } else { + if (responseCase_ == 2) { + errorBuilder_.mergeFrom(value); + } else { + errorBuilder_.setMessage(value); + } + } + responseCase_ = 2; + return this; + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder clearError() { + if (errorBuilder_ == null) { + if (responseCase_ == 2) { + responseCase_ = 0; + response_ = null; + onChanged(); + } + } else { + if (responseCase_ == 2) { + responseCase_ = 0; + response_ = null; + } + errorBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public com.google.rpc.Status.Builder getErrorBuilder() { + return getErrorFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { + if ((responseCase_ == 2) && (errorBuilder_ != null)) { + return errorBuilder_.getMessageOrBuilder(); + } else { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     *
+     * Additional information about error signalling:
+     *
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     *
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     *
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     *
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     *
+     * INTERNAL: Indicates server side error(s) that can be retried.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + getErrorFieldBuilder() { + if (errorBuilder_ == null) { + if (!(responseCase_ == 2)) { + response_ = com.google.rpc.Status.getDefaultInstance(); + } + errorBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, + com.google.rpc.Status.Builder, + com.google.rpc.StatusOrBuilder>( + (com.google.rpc.Status) response_, getParentForChildren(), isClean()); + response_ = null; + } + responseCase_ = 2; + onChanged(); + return errorBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta2.TableSchema updatedSchema_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.TableSchema, + com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder> + updatedSchemaBuilder_; + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + * + * @return Whether the updatedSchema field is set. + */ + public boolean hasUpdatedSchema() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + * + * @return The updatedSchema. + */ + public com.google.cloud.bigquery.storage.v1beta2.TableSchema getUpdatedSchema() { + if (updatedSchemaBuilder_ == null) { + return updatedSchema_ == null + ? com.google.cloud.bigquery.storage.v1beta2.TableSchema.getDefaultInstance() + : updatedSchema_; + } else { + return updatedSchemaBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + */ + public Builder setUpdatedSchema(com.google.cloud.bigquery.storage.v1beta2.TableSchema value) { + if (updatedSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updatedSchema_ = value; + } else { + updatedSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + */ + public Builder setUpdatedSchema( + com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder builderForValue) { + if (updatedSchemaBuilder_ == null) { + updatedSchema_ = builderForValue.build(); + } else { + updatedSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + */ + public Builder mergeUpdatedSchema(com.google.cloud.bigquery.storage.v1beta2.TableSchema value) { + if (updatedSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && updatedSchema_ != null + && updatedSchema_ + != com.google.cloud.bigquery.storage.v1beta2.TableSchema.getDefaultInstance()) { + getUpdatedSchemaBuilder().mergeFrom(value); + } else { + updatedSchema_ = value; + } + } else { + updatedSchemaBuilder_.mergeFrom(value); + } + if (updatedSchema_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + */ + public Builder clearUpdatedSchema() { + bitField0_ = (bitField0_ & ~0x00000004); + updatedSchema_ = null; + if (updatedSchemaBuilder_ != null) { + updatedSchemaBuilder_.dispose(); + updatedSchemaBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + */ + public com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder getUpdatedSchemaBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getUpdatedSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + */ + public com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder + getUpdatedSchemaOrBuilder() { + if (updatedSchemaBuilder_ != null) { + return updatedSchemaBuilder_.getMessageOrBuilder(); + } else { + return updatedSchema_ == null + ? com.google.cloud.bigquery.storage.v1beta2.TableSchema.getDefaultInstance() + : updatedSchema_; + } + } + + /** + * + * + *
+     * If backend detects a schema update, pass it to user so that user can
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.TableSchema, + com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder> + getUpdatedSchemaFieldBuilder() { + if (updatedSchemaBuilder_ == null) { + updatedSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.TableSchema, + com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder>( + getUpdatedSchema(), getParentForChildren(), isClean()); + updatedSchema_ = null; + } + return updatedSchemaBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.AppendRowsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.AppendRowsResponse) + private static final com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AppendRowsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponseOrBuilder.java new file mode 100644 index 000000000000..0d3a3d13a0f8 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponseOrBuilder.java @@ -0,0 +1,206 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface AppendRowsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.AppendRowsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return Whether the appendResult field is set. + */ + boolean hasAppendResult(); + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + * + * @return The appendResult. + */ + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult getAppendResult(); + + /** + * + * + *
+   * Result if the append is successful.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult append_result = 1; + * + */ + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResultOrBuilder + getAppendResultOrBuilder(); + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   *
+   * Additional information about error signalling:
+   *
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   *
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   *
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   *
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   *
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + * + * @return Whether the error field is set. + */ + boolean hasError(); + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   *
+   * Additional information about error signalling:
+   *
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   *
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   *
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   *
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   *
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + * + * @return The error. + */ + com.google.rpc.Status getError(); + + /** + * + * + *
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   *
+   * Additional information about error signalling:
+   *
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   *
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   *
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   *
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   *
+   * INTERNAL: Indicates server side error(s) that can be retried.
+   * 
+ * + * .google.rpc.Status error = 2; + */ + com.google.rpc.StatusOrBuilder getErrorOrBuilder(); + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + * + * @return Whether the updatedSchema field is set. + */ + boolean hasUpdatedSchema(); + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + * + * @return The updatedSchema. + */ + com.google.cloud.bigquery.storage.v1beta2.TableSchema getUpdatedSchema(); + + /** + * + * + *
+   * If backend detects a schema update, pass it to user so that user can
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; + */ + com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder getUpdatedSchemaOrBuilder(); + + com.google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.ResponseCase getResponseCase(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowProto.java new file mode 100644 index 000000000000..9198266a5d52 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowProto.java @@ -0,0 +1,96 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public final class ArrowProto { + private ArrowProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSerializationOptions_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSerializationOptions_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n1google/cloud/bigquery/storage/v1beta2/" + + "arrow.proto\022%google.cloud.bigquery.stora" + + "ge.v1beta2\"(\n\013ArrowSchema\022\031\n\021serialized_" + + "schema\030\001 \001(\014\"3\n\020ArrowRecordBatch\022\037\n\027seri" + + "alized_record_batch\030\001 \001(\014\"\266\001\n\031ArrowSeria" + + "lizationOptions\022W\n\006format\030\001 \001(\0162G.google" + + ".cloud.bigquery.storage.v1beta2.ArrowSer" + + "ializationOptions.Format\"@\n\006Format\022\026\n\022FO" + + "RMAT_UNSPECIFIED\020\000\022\016\n\nARROW_0_14\020\001\022\016\n\nAR" + + "ROW_0_15\020\002B~\n)com.google.cloud.bigquery." + + "storage.v1beta2B\nArrowProtoP\001ZCcloud.goo" + + "gle.com/go/bigquery/storage/apiv1beta2/s" + + "toragepb;storagepbb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_descriptor, + new java.lang.String[] { + "SerializedSchema", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_descriptor, + new java.lang.String[] { + "SerializedRecordBatch", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSerializationOptions_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSerializationOptions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSerializationOptions_descriptor, + new java.lang.String[] { + "Format", + }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatch.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatch.java new file mode 100644 index 000000000000..13f037b6e2d0 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatch.java @@ -0,0 +1,548 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Arrow RecordBatch.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch} + */ +public final class ArrowRecordBatch extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) + ArrowRecordBatchOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ArrowRecordBatch.newBuilder() to construct. + private ArrowRecordBatch(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ArrowRecordBatch() { + serializedRecordBatch_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ArrowRecordBatch(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.class, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder.class); + } + + public static final int SERIALIZED_RECORD_BATCH_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString serializedRecordBatch_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * IPC-serialized Arrow RecordBatch.
+   * 
+ * + * bytes serialized_record_batch = 1; + * + * @return The serializedRecordBatch. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedRecordBatch() { + return serializedRecordBatch_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!serializedRecordBatch_.isEmpty()) { + output.writeBytes(1, serializedRecordBatch_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!serializedRecordBatch_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, serializedRecordBatch_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch other = + (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) obj; + + if (!getSerializedRecordBatch().equals(other.getSerializedRecordBatch())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERIALIZED_RECORD_BATCH_FIELD_NUMBER; + hash = (53 * hash) + getSerializedRecordBatch().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Arrow RecordBatch.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.class, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + serializedRecordBatch_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch build() { + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch result = + new com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.serializedRecordBatch_ = serializedRecordBatch_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance()) + return this; + if (other.getSerializedRecordBatch() != com.google.protobuf.ByteString.EMPTY) { + setSerializedRecordBatch(other.getSerializedRecordBatch()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + serializedRecordBatch_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString serializedRecordBatch_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * IPC-serialized Arrow RecordBatch.
+     * 
+ * + * bytes serialized_record_batch = 1; + * + * @return The serializedRecordBatch. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedRecordBatch() { + return serializedRecordBatch_; + } + + /** + * + * + *
+     * IPC-serialized Arrow RecordBatch.
+     * 
+ * + * bytes serialized_record_batch = 1; + * + * @param value The serializedRecordBatch to set. + * @return This builder for chaining. + */ + public Builder setSerializedRecordBatch(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + serializedRecordBatch_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * IPC-serialized Arrow RecordBatch.
+     * 
+ * + * bytes serialized_record_batch = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedRecordBatch() { + bitField0_ = (bitField0_ & ~0x00000001); + serializedRecordBatch_ = getDefaultInstance().getSerializedRecordBatch(); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) + private static final com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ArrowRecordBatch parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatchOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatchOrBuilder.java new file mode 100644 index 000000000000..229367f108f8 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatchOrBuilder.java @@ -0,0 +1,39 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ArrowRecordBatchOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * IPC-serialized Arrow RecordBatch.
+   * 
+ * + * bytes serialized_record_batch = 1; + * + * @return The serializedRecordBatch. + */ + com.google.protobuf.ByteString getSerializedRecordBatch(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchema.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchema.java new file mode 100644 index 000000000000..c1650c52edbc --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchema.java @@ -0,0 +1,556 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Arrow schema as specified in
+ * https://arrow.apache.org/docs/python/api/datatypes.html
+ * and serialized to bytes using IPC:
+ * https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc
+ *
+ * See code samples on how this message can be deserialized.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ArrowSchema} + */ +public final class ArrowSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ArrowSchema) + ArrowSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ArrowSchema.newBuilder() to construct. + private ArrowSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ArrowSchema() { + serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ArrowSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.class, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder.class); + } + + public static final int SERIALIZED_SCHEMA_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * IPC serialized Arrow schema.
+   * 
+ * + * bytes serialized_schema = 1; + * + * @return The serializedSchema. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedSchema() { + return serializedSchema_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!serializedSchema_.isEmpty()) { + output.writeBytes(1, serializedSchema_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!serializedSchema_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, serializedSchema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ArrowSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema other = + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) obj; + + if (!getSerializedSchema().equals(other.getSerializedSchema())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERIALIZED_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getSerializedSchema().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Arrow schema as specified in
+   * https://arrow.apache.org/docs/python/api/datatypes.html
+   * and serialized to bytes using IPC:
+   * https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc
+   *
+   * See code samples on how this message can be deserialized.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ArrowSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ArrowSchema) + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.class, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema build() { + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema result = + new com.google.cloud.bigquery.storage.v1beta2.ArrowSchema(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.ArrowSchema result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.serializedSchema_ = serializedSchema_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ArrowSchema other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance()) + return this; + if (other.getSerializedSchema() != com.google.protobuf.ByteString.EMPTY) { + setSerializedSchema(other.getSerializedSchema()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + serializedSchema_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * IPC serialized Arrow schema.
+     * 
+ * + * bytes serialized_schema = 1; + * + * @return The serializedSchema. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedSchema() { + return serializedSchema_; + } + + /** + * + * + *
+     * IPC serialized Arrow schema.
+     * 
+ * + * bytes serialized_schema = 1; + * + * @param value The serializedSchema to set. + * @return This builder for chaining. + */ + public Builder setSerializedSchema(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + serializedSchema_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * IPC serialized Arrow schema.
+     * 
+ * + * bytes serialized_schema = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedSchema() { + bitField0_ = (bitField0_ & ~0x00000001); + serializedSchema_ = getDefaultInstance().getSerializedSchema(); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ArrowSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ArrowSchema) + private static final com.google.cloud.bigquery.storage.v1beta2.ArrowSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ArrowSchema(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ArrowSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchemaOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchemaOrBuilder.java new file mode 100644 index 000000000000..102058e3fb34 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchemaOrBuilder.java @@ -0,0 +1,39 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ArrowSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ArrowSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * IPC serialized Arrow schema.
+   * 
+ * + * bytes serialized_schema = 1; + * + * @return The serializedSchema. + */ + com.google.protobuf.ByteString getSerializedSchema(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptions.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptions.java new file mode 100644 index 000000000000..46ef4009112e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptions.java @@ -0,0 +1,793 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Contains options specific to Arrow Serialization.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions} + */ +public final class ArrowSerializationOptions extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions) + ArrowSerializationOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ArrowSerializationOptions.newBuilder() to construct. + private ArrowSerializationOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ArrowSerializationOptions() { + format_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ArrowSerializationOptions(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSerializationOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSerializationOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.class, + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Builder.class); + } + + /** + * + * + *
+   * The IPC format to use when serializing Arrow streams.
+   * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format} + */ + public enum Format implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * If unspecied the IPC format as of 0.15 release will be used.
+     * 
+ * + * FORMAT_UNSPECIFIED = 0; + */ + FORMAT_UNSPECIFIED(0), + /** + * + * + *
+     * Use the legacy IPC message format as of Apache Arrow Release 0.14.
+     * 
+ * + * ARROW_0_14 = 1; + */ + ARROW_0_14(1), + /** + * + * + *
+     * Use the message format as of Apache Arrow Release 0.15.
+     * 
+ * + * ARROW_0_15 = 2; + */ + ARROW_0_15(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * If unspecied the IPC format as of 0.15 release will be used.
+     * 
+ * + * FORMAT_UNSPECIFIED = 0; + */ + public static final int FORMAT_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * Use the legacy IPC message format as of Apache Arrow Release 0.14.
+     * 
+ * + * ARROW_0_14 = 1; + */ + public static final int ARROW_0_14_VALUE = 1; + + /** + * + * + *
+     * Use the message format as of Apache Arrow Release 0.15.
+     * 
+ * + * ARROW_0_15 = 2; + */ + public static final int ARROW_0_15_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Format valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Format forNumber(int value) { + switch (value) { + case 0: + return FORMAT_UNSPECIFIED; + case 1: + return ARROW_0_14; + case 2: + return ARROW_0_15; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Format findValueByNumber(int number) { + return Format.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final Format[] VALUES = values(); + + public static Format valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Format(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format) + } + + public static final int FORMAT_FIELD_NUMBER = 1; + private int format_ = 0; + + /** + * + * + *
+   * The Arrow IPC format to use.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format format = 1; + * + * + * @return The enum numeric value on the wire for format. + */ + @java.lang.Override + public int getFormatValue() { + return format_; + } + + /** + * + * + *
+   * The Arrow IPC format to use.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format format = 1; + * + * + * @return The format. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format getFormat() { + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format result = + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format.forNumber( + format_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (format_ + != com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format + .FORMAT_UNSPECIFIED + .getNumber()) { + output.writeEnum(1, format_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (format_ + != com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format + .FORMAT_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, format_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions other = + (com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions) obj; + + if (format_ != other.format_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + FORMAT_FIELD_NUMBER; + hash = (53 * hash) + format_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Contains options specific to Arrow Serialization.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions) + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSerializationOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSerializationOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.class, + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + format_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSerializationOptions_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions build() { + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions result = + new com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.format_ = format_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + .getDefaultInstance()) return this; + if (other.format_ != 0) { + setFormatValue(other.getFormatValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + format_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int format_ = 0; + + /** + * + * + *
+     * The Arrow IPC format to use.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format format = 1; + * + * + * @return The enum numeric value on the wire for format. + */ + @java.lang.Override + public int getFormatValue() { + return format_; + } + + /** + * + * + *
+     * The Arrow IPC format to use.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format format = 1; + * + * + * @param value The enum numeric value on the wire for format to set. + * @return This builder for chaining. + */ + public Builder setFormatValue(int value) { + format_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The Arrow IPC format to use.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format format = 1; + * + * + * @return The format. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format getFormat() { + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format result = + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format.forNumber( + format_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * The Arrow IPC format to use.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format format = 1; + * + * + * @param value The format to set. + * @return This builder for chaining. + */ + public Builder setFormat( + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + format_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * The Arrow IPC format to use.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format format = 1; + * + * + * @return This builder for chaining. + */ + public Builder clearFormat() { + bitField0_ = (bitField0_ & ~0x00000001); + format_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions) + private static final com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ArrowSerializationOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptionsOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptionsOrBuilder.java new file mode 100644 index 000000000000..3e018284c383 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptionsOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/arrow.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ArrowSerializationOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The Arrow IPC format to use.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format format = 1; + * + * + * @return The enum numeric value on the wire for format. + */ + int getFormatValue(); + + /** + * + * + *
+   * The Arrow IPC format to use.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format format = 1; + * + * + * @return The format. + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Format getFormat(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroProto.java new file mode 100644 index 000000000000..6f65817647a5 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroProto.java @@ -0,0 +1,79 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public final class AvroProto { + private AvroProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n0google/cloud/bigquery/storage/v1beta2/" + + "avro.proto\022%google.cloud.bigquery.storag" + + "e.v1beta2\"\034\n\nAvroSchema\022\016\n\006schema\030\001 \001(\t\"" + + "*\n\010AvroRows\022\036\n\026serialized_binary_rows\030\001 " + + "\001(\014B}\n)com.google.cloud.bigquery.storage" + + ".v1beta2B\tAvroProtoP\001ZCcloud.google.com/" + + "go/bigquery/storage/apiv1beta2/storagepb" + + ";storagepbb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); + internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_descriptor, + new java.lang.String[] { + "Schema", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_descriptor, + new java.lang.String[] { + "SerializedBinaryRows", + }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRows.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRows.java new file mode 100644 index 000000000000..e10ede315923 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRows.java @@ -0,0 +1,547 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Avro rows.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AvroRows} + */ +public final class AvroRows extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.AvroRows) + AvroRowsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AvroRows.newBuilder() to construct. + private AvroRows(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AvroRows() { + serializedBinaryRows_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AvroRows(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AvroRows.class, + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder.class); + } + + public static final int SERIALIZED_BINARY_ROWS_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString serializedBinaryRows_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+   * Binary serialized rows in a block.
+   * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return The serializedBinaryRows. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedBinaryRows() { + return serializedBinaryRows_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!serializedBinaryRows_.isEmpty()) { + output.writeBytes(1, serializedBinaryRows_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!serializedBinaryRows_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, serializedBinaryRows_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.AvroRows)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.AvroRows other = + (com.google.cloud.bigquery.storage.v1beta2.AvroRows) obj; + + if (!getSerializedBinaryRows().equals(other.getSerializedBinaryRows())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERIALIZED_BINARY_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getSerializedBinaryRows().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1beta2.AvroRows prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Avro rows.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AvroRows} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.AvroRows) + com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AvroRows.class, + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.AvroRows.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + serializedBinaryRows_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroRows getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroRows build() { + com.google.cloud.bigquery.storage.v1beta2.AvroRows result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroRows buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.AvroRows result = + new com.google.cloud.bigquery.storage.v1beta2.AvroRows(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.AvroRows result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.serializedBinaryRows_ = serializedBinaryRows_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.AvroRows) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.AvroRows) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.AvroRows other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance()) + return this; + if (other.getSerializedBinaryRows() != com.google.protobuf.ByteString.EMPTY) { + setSerializedBinaryRows(other.getSerializedBinaryRows()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + serializedBinaryRows_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString serializedBinaryRows_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
+     * Binary serialized rows in a block.
+     * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return The serializedBinaryRows. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSerializedBinaryRows() { + return serializedBinaryRows_; + } + + /** + * + * + *
+     * Binary serialized rows in a block.
+     * 
+ * + * bytes serialized_binary_rows = 1; + * + * @param value The serializedBinaryRows to set. + * @return This builder for chaining. + */ + public Builder setSerializedBinaryRows(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + serializedBinaryRows_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Binary serialized rows in a block.
+     * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedBinaryRows() { + bitField0_ = (bitField0_ & ~0x00000001); + serializedBinaryRows_ = getDefaultInstance().getSerializedBinaryRows(); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.AvroRows) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.AvroRows) + private static final com.google.cloud.bigquery.storage.v1beta2.AvroRows DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.AvroRows(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AvroRows parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroRows getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRowsOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRowsOrBuilder.java new file mode 100644 index 000000000000..e9adfb1836ce --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRowsOrBuilder.java @@ -0,0 +1,39 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface AvroRowsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.AvroRows) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Binary serialized rows in a block.
+   * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return The serializedBinaryRows. + */ + com.google.protobuf.ByteString getSerializedBinaryRows(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchema.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchema.java new file mode 100644 index 000000000000..66185a316107 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchema.java @@ -0,0 +1,641 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Avro schema.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AvroSchema} + */ +public final class AvroSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.AvroSchema) + AvroSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use AvroSchema.newBuilder() to construct. + private AvroSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AvroSchema() { + schema_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AvroSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.class, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder.class); + } + + public static final int SCHEMA_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object schema_ = ""; + + /** + * + * + *
+   * Json serialized schema, as described at
+   * https://avro.apache.org/docs/1.8.1/spec.html.
+   * 
+ * + * string schema = 1; + * + * @return The schema. + */ + @java.lang.Override + public java.lang.String getSchema() { + java.lang.Object ref = schema_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + schema_ = s; + return s; + } + } + + /** + * + * + *
+   * Json serialized schema, as described at
+   * https://avro.apache.org/docs/1.8.1/spec.html.
+   * 
+ * + * string schema = 1; + * + * @return The bytes for schema. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSchemaBytes() { + java.lang.Object ref = schema_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + schema_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(schema_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, schema_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(schema_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, schema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.AvroSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.AvroSchema other = + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) obj; + + if (!getSchema().equals(other.getSchema())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getSchema().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1beta2.AvroSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Avro schema.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AvroSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.AvroSchema) + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.class, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.AvroSchema.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + schema_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema build() { + com.google.cloud.bigquery.storage.v1beta2.AvroSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.AvroSchema result = + new com.google.cloud.bigquery.storage.v1beta2.AvroSchema(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.AvroSchema result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.schema_ = schema_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.AvroSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.AvroSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.AvroSchema other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance()) + return this; + if (!other.getSchema().isEmpty()) { + schema_ = other.schema_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + schema_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object schema_ = ""; + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @return The schema. + */ + public java.lang.String getSchema() { + java.lang.Object ref = schema_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + schema_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @return The bytes for schema. + */ + public com.google.protobuf.ByteString getSchemaBytes() { + java.lang.Object ref = schema_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + schema_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @param value The schema to set. + * @return This builder for chaining. + */ + public Builder setSchema(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @return This builder for chaining. + */ + public Builder clearSchema() { + schema_ = getDefaultInstance().getSchema(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @param value The bytes for schema to set. + * @return This builder for chaining. + */ + public Builder setSchemaBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + schema_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.AvroSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.AvroSchema) + private static final com.google.cloud.bigquery.storage.v1beta2.AvroSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.AvroSchema(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AvroSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchemaOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchemaOrBuilder.java new file mode 100644 index 000000000000..2dbad8896116 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchemaOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/avro.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface AvroSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.AvroSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Json serialized schema, as described at
+   * https://avro.apache.org/docs/1.8.1/spec.html.
+   * 
+ * + * string schema = 1; + * + * @return The schema. + */ + java.lang.String getSchema(); + + /** + * + * + *
+   * Json serialized schema, as described at
+   * https://avro.apache.org/docs/1.8.1/spec.html.
+   * 
+ * + * string schema = 1; + * + * @return The bytes for schema. + */ + com.google.protobuf.ByteString getSchemaBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequest.java new file mode 100644 index 000000000000..a7cea1c2b969 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequest.java @@ -0,0 +1,949 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Request message for `BatchCommitWriteStreams`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest} + */ +public final class BatchCommitWriteStreamsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest) + BatchCommitWriteStreamsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchCommitWriteStreamsRequest.newBuilder() to construct. + private BatchCommitWriteStreamsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCommitWriteStreamsRequest() { + parent_ = ""; + writeStreams_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCommitWriteStreamsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest.class, + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Parent table that all the streams should belong to, in the form
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Parent table that all the streams should belong to, in the form
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int WRITE_STREAMS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList writeStreams_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the writeStreams. + */ + public com.google.protobuf.ProtocolStringList getWriteStreamsList() { + return writeStreams_; + } + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of writeStreams. + */ + public int getWriteStreamsCount() { + return writeStreams_.size(); + } + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The writeStreams at the given index. + */ + public java.lang.String getWriteStreams(int index) { + return writeStreams_.get(index); + } + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the writeStreams at the given index. + */ + public com.google.protobuf.ByteString getWriteStreamsBytes(int index) { + return writeStreams_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + for (int i = 0; i < writeStreams_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, writeStreams_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + { + int dataSize = 0; + for (int i = 0; i < writeStreams_.size(); i++) { + dataSize += computeStringSizeNoTag(writeStreams_.getRaw(i)); + } + size += dataSize; + size += 1 * getWriteStreamsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest other = + (com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getWriteStreamsList().equals(other.getWriteStreamsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getWriteStreamsCount() > 0) { + hash = (37 * hash) + WRITE_STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getWriteStreamsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `BatchCommitWriteStreams`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest) + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest.class, + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + writeStreams_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest build() { + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest result = + new com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + writeStreams_.makeImmutable(); + result.writeStreams_ = writeStreams_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.writeStreams_.isEmpty()) { + if (writeStreams_.isEmpty()) { + writeStreams_ = other.writeStreams_; + bitField0_ |= 0x00000002; + } else { + ensureWriteStreamsIsMutable(); + writeStreams_.addAll(other.writeStreams_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureWriteStreamsIsMutable(); + writeStreams_.add(s); + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList writeStreams_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureWriteStreamsIsMutable() { + if (!writeStreams_.isModifiable()) { + writeStreams_ = new com.google.protobuf.LazyStringArrayList(writeStreams_); + } + bitField0_ |= 0x00000002; + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the writeStreams. + */ + public com.google.protobuf.ProtocolStringList getWriteStreamsList() { + writeStreams_.makeImmutable(); + return writeStreams_; + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of writeStreams. + */ + public int getWriteStreamsCount() { + return writeStreams_.size(); + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The writeStreams at the given index. + */ + public java.lang.String getWriteStreams(int index) { + return writeStreams_.get(index); + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the writeStreams at the given index. + */ + public com.google.protobuf.ByteString getWriteStreamsBytes(int index) { + return writeStreams_.getByteString(index); + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The writeStreams to set. + * @return This builder for chaining. + */ + public Builder setWriteStreams(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWriteStreamsIsMutable(); + writeStreams_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The writeStreams to add. + * @return This builder for chaining. + */ + public Builder addWriteStreams(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWriteStreamsIsMutable(); + writeStreams_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The writeStreams to add. + * @return This builder for chaining. + */ + public Builder addAllWriteStreams(java.lang.Iterable values) { + ensureWriteStreamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, writeStreams_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearWriteStreams() { + writeStreams_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the writeStreams to add. + * @return This builder for chaining. + */ + public Builder addWriteStreamsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureWriteStreamsIsMutable(); + writeStreams_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest) + private static final com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCommitWriteStreamsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequestOrBuilder.java new file mode 100644 index 000000000000..fe6fd6f7ed6b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequestOrBuilder.java @@ -0,0 +1,108 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface BatchCommitWriteStreamsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Parent table that all the streams should belong to, in the form
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Parent table that all the streams should belong to, in the form
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the writeStreams. + */ + java.util.List getWriteStreamsList(); + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of writeStreams. + */ + int getWriteStreamsCount(); + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The writeStreams at the given index. + */ + java.lang.String getWriteStreams(int index); + + /** + * + * + *
+   * Required. The group of streams that will be committed atomically.
+   * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the writeStreams at the given index. + */ + com.google.protobuf.ByteString getWriteStreamsBytes(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponse.java new file mode 100644 index 000000000000..5c01de753e12 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponse.java @@ -0,0 +1,1398 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Response message for `BatchCommitWriteStreams`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse} + */ +public final class BatchCommitWriteStreamsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse) + BatchCommitWriteStreamsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BatchCommitWriteStreamsResponse.newBuilder() to construct. + private BatchCommitWriteStreamsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCommitWriteStreamsResponse() { + streamErrors_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCommitWriteStreamsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse.class, + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse.Builder + .class); + } + + private int bitField0_; + public static final int COMMIT_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp commitTime_; + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + @java.lang.Override + public boolean hasCommitTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTime() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + + public static final int STREAM_ERRORS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List streamErrors_; + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + @java.lang.Override + public java.util.List + getStreamErrorsList() { + return streamErrors_; + } + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + @java.lang.Override + public java.util.List + getStreamErrorsOrBuilderList() { + return streamErrors_; + } + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + @java.lang.Override + public int getStreamErrorsCount() { + return streamErrors_.size(); + } + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StorageError getStreamErrors(int index) { + return streamErrors_.get(index); + } + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StorageErrorOrBuilder getStreamErrorsOrBuilder( + int index) { + return streamErrors_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getCommitTime()); + } + for (int i = 0; i < streamErrors_.size(); i++) { + output.writeMessage(2, streamErrors_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommitTime()); + } + for (int i = 0; i < streamErrors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, streamErrors_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse other = + (com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse) obj; + + if (hasCommitTime() != other.hasCommitTime()) return false; + if (hasCommitTime()) { + if (!getCommitTime().equals(other.getCommitTime())) return false; + } + if (!getStreamErrorsList().equals(other.getStreamErrorsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCommitTime()) { + hash = (37 * hash) + COMMIT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCommitTime().hashCode(); + } + if (getStreamErrorsCount() > 0) { + hash = (37 * hash) + STREAM_ERRORS_FIELD_NUMBER; + hash = (53 * hash) + getStreamErrorsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for `BatchCommitWriteStreams`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse) + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse.class, + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getCommitTimeFieldBuilder(); + getStreamErrorsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + commitTime_ = null; + if (commitTimeBuilder_ != null) { + commitTimeBuilder_.dispose(); + commitTimeBuilder_ = null; + } + if (streamErrorsBuilder_ == null) { + streamErrors_ = java.util.Collections.emptyList(); + } else { + streamErrors_ = null; + streamErrorsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse build() { + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse + buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse result = + new com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse result) { + if (streamErrorsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + streamErrors_ = java.util.Collections.unmodifiableList(streamErrors_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.streamErrors_ = streamErrors_; + } else { + result.streamErrors_ = streamErrorsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.commitTime_ = commitTimeBuilder_ == null ? commitTime_ : commitTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse + .getDefaultInstance()) return this; + if (other.hasCommitTime()) { + mergeCommitTime(other.getCommitTime()); + } + if (streamErrorsBuilder_ == null) { + if (!other.streamErrors_.isEmpty()) { + if (streamErrors_.isEmpty()) { + streamErrors_ = other.streamErrors_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureStreamErrorsIsMutable(); + streamErrors_.addAll(other.streamErrors_); + } + onChanged(); + } + } else { + if (!other.streamErrors_.isEmpty()) { + if (streamErrorsBuilder_.isEmpty()) { + streamErrorsBuilder_.dispose(); + streamErrorsBuilder_ = null; + streamErrors_ = other.streamErrors_; + bitField0_ = (bitField0_ & ~0x00000002); + streamErrorsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getStreamErrorsFieldBuilder() + : null; + } else { + streamErrorsBuilder_.addAllMessages(other.streamErrors_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getCommitTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.cloud.bigquery.storage.v1beta2.StorageError m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.StorageError.parser(), + extensionRegistry); + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.add(m); + } else { + streamErrorsBuilder_.addMessage(m); + } + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp commitTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimeBuilder_; + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + public boolean hasCommitTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + public com.google.protobuf.Timestamp getCommitTime() { + if (commitTimeBuilder_ == null) { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } else { + return commitTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder setCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTime_ = value; + } else { + commitTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimeBuilder_ == null) { + commitTime_ = builderForValue.build(); + } else { + commitTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && commitTime_ != null + && commitTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCommitTimeBuilder().mergeFrom(value); + } else { + commitTime_ = value; + } + } else { + commitTimeBuilder_.mergeFrom(value); + } + if (commitTime_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder clearCommitTime() { + bitField0_ = (bitField0_ & ~0x00000001); + commitTime_ = null; + if (commitTimeBuilder_ != null) { + commitTimeBuilder_.dispose(); + commitTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getCommitTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + if (commitTimeBuilder_ != null) { + return commitTimeBuilder_.getMessageOrBuilder(); + } else { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } + } + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCommitTimeFieldBuilder() { + if (commitTimeBuilder_ == null) { + commitTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTime(), getParentForChildren(), isClean()); + commitTime_ = null; + } + return commitTimeBuilder_; + } + + private java.util.List streamErrors_ = + java.util.Collections.emptyList(); + + private void ensureStreamErrorsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + streamErrors_ = + new java.util.ArrayList( + streamErrors_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StorageError, + com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder, + com.google.cloud.bigquery.storage.v1beta2.StorageErrorOrBuilder> + streamErrorsBuilder_; + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public java.util.List + getStreamErrorsList() { + if (streamErrorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(streamErrors_); + } else { + return streamErrorsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public int getStreamErrorsCount() { + if (streamErrorsBuilder_ == null) { + return streamErrors_.size(); + } else { + return streamErrorsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StorageError getStreamErrors(int index) { + if (streamErrorsBuilder_ == null) { + return streamErrors_.get(index); + } else { + return streamErrorsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public Builder setStreamErrors( + int index, com.google.cloud.bigquery.storage.v1beta2.StorageError value) { + if (streamErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamErrorsIsMutable(); + streamErrors_.set(index, value); + onChanged(); + } else { + streamErrorsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public Builder setStreamErrors( + int index, com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder builderForValue) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.set(index, builderForValue.build()); + onChanged(); + } else { + streamErrorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public Builder addStreamErrors(com.google.cloud.bigquery.storage.v1beta2.StorageError value) { + if (streamErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamErrorsIsMutable(); + streamErrors_.add(value); + onChanged(); + } else { + streamErrorsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public Builder addStreamErrors( + int index, com.google.cloud.bigquery.storage.v1beta2.StorageError value) { + if (streamErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamErrorsIsMutable(); + streamErrors_.add(index, value); + onChanged(); + } else { + streamErrorsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public Builder addStreamErrors( + com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder builderForValue) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.add(builderForValue.build()); + onChanged(); + } else { + streamErrorsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public Builder addStreamErrors( + int index, com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder builderForValue) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.add(index, builderForValue.build()); + onChanged(); + } else { + streamErrorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public Builder addAllStreamErrors( + java.lang.Iterable + values) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, streamErrors_); + onChanged(); + } else { + streamErrorsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public Builder clearStreamErrors() { + if (streamErrorsBuilder_ == null) { + streamErrors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + streamErrorsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public Builder removeStreamErrors(int index) { + if (streamErrorsBuilder_ == null) { + ensureStreamErrorsIsMutable(); + streamErrors_.remove(index); + onChanged(); + } else { + streamErrorsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder getStreamErrorsBuilder( + int index) { + return getStreamErrorsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StorageErrorOrBuilder getStreamErrorsOrBuilder( + int index) { + if (streamErrorsBuilder_ == null) { + return streamErrors_.get(index); + } else { + return streamErrorsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public java.util.List + getStreamErrorsOrBuilderList() { + if (streamErrorsBuilder_ != null) { + return streamErrorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(streamErrors_); + } + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder addStreamErrorsBuilder() { + return getStreamErrorsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1beta2.StorageError.getDefaultInstance()); + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder addStreamErrorsBuilder( + int index) { + return getStreamErrorsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1beta2.StorageError.getDefaultInstance()); + } + + /** + * + * + *
+     * Stream level error if commit failed. Only streams with error will be in
+     * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + public java.util.List + getStreamErrorsBuilderList() { + return getStreamErrorsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StorageError, + com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder, + com.google.cloud.bigquery.storage.v1beta2.StorageErrorOrBuilder> + getStreamErrorsFieldBuilder() { + if (streamErrorsBuilder_ == null) { + streamErrorsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StorageError, + com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder, + com.google.cloud.bigquery.storage.v1beta2.StorageErrorOrBuilder>( + streamErrors_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + streamErrors_ = null; + } + return streamErrorsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse) + private static final com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCommitWriteStreamsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponseOrBuilder.java new file mode 100644 index 000000000000..bccca201ab62 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponseOrBuilder.java @@ -0,0 +1,146 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface BatchCommitWriteStreamsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.BatchCommitWriteStreamsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + boolean hasCommitTime(); + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + com.google.protobuf.Timestamp getCommitTime(); + + /** + * + * + *
+   * The time at which streams were committed in microseconds granularity.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder(); + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + java.util.List getStreamErrorsList(); + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + com.google.cloud.bigquery.storage.v1beta2.StorageError getStreamErrors(int index); + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + int getStreamErrorsCount(); + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + java.util.List + getStreamErrorsOrBuilderList(); + + /** + * + * + *
+   * Stream level error if commit failed. Only streams with error will be in
+   * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; + */ + com.google.cloud.bigquery.storage.v1beta2.StorageErrorOrBuilder getStreamErrorsOrBuilder( + int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequest.java new file mode 100644 index 000000000000..0c732d98ea92 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequest.java @@ -0,0 +1,1111 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Request message for `CreateReadSession`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest} + */ +public final class CreateReadSessionRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) + CreateReadSessionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use CreateReadSessionRequest.newBuilder() to construct. + private CreateReadSessionRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateReadSessionRequest() { + parent_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateReadSessionRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest.class, + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. The request project that owns the session, in the form of
+   * `projects/{project_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The request project that owns the session, in the form of
+   * `projects/{project_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int READ_SESSION_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta2.ReadSession readSession_; + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the readSession field is set. + */ + @java.lang.Override + public boolean hasReadSession() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The readSession. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession getReadSession() { + return readSession_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.getDefaultInstance() + : readSession_; + } + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder getReadSessionOrBuilder() { + return readSession_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.getDefaultInstance() + : readSession_; + } + + public static final int MAX_STREAM_COUNT_FIELD_NUMBER = 3; + private int maxStreamCount_ = 0; + + /** + * + * + *
+   * Max initial number of streams. If unset or zero, the server will
+   * provide a value of streams so as to produce reasonable throughput. Must be
+   * non-negative. The number of streams may be lower than the requested number,
+   * depending on the amount parallelism that is reasonable for the table. Error
+   * will be returned if the max count is greater than the current system
+   * max limit of 1,000.
+   *
+   * Streams must be read starting from offset 0.
+   * 
+ * + * int32 max_stream_count = 3; + * + * @return The maxStreamCount. + */ + @java.lang.Override + public int getMaxStreamCount() { + return maxStreamCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getReadSession()); + } + if (maxStreamCount_ != 0) { + output.writeInt32(3, maxStreamCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getReadSession()); + } + if (maxStreamCount_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, maxStreamCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest other = + (com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasReadSession() != other.hasReadSession()) return false; + if (hasReadSession()) { + if (!getReadSession().equals(other.getReadSession())) return false; + } + if (getMaxStreamCount() != other.getMaxStreamCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasReadSession()) { + hash = (37 * hash) + READ_SESSION_FIELD_NUMBER; + hash = (53 * hash) + getReadSession().hashCode(); + } + hash = (37 * hash) + MAX_STREAM_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getMaxStreamCount(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `CreateReadSession`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest.class, + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getReadSessionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + readSession_ = null; + if (readSessionBuilder_ != null) { + readSessionBuilder_.dispose(); + readSessionBuilder_ = null; + } + maxStreamCount_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest build() { + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest result = + new com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.readSession_ = + readSessionBuilder_ == null ? readSession_ : readSessionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.maxStreamCount_ = maxStreamCount_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasReadSession()) { + mergeReadSession(other.getReadSession()); + } + if (other.getMaxStreamCount() != 0) { + setMaxStreamCount(other.getMaxStreamCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getReadSessionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + maxStreamCount_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta2.ReadSession readSession_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder> + readSessionBuilder_; + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the readSession field is set. + */ + public boolean hasReadSession() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The readSession. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession getReadSession() { + if (readSessionBuilder_ == null) { + return readSession_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.getDefaultInstance() + : readSession_; + } else { + return readSessionBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReadSession(com.google.cloud.bigquery.storage.v1beta2.ReadSession value) { + if (readSessionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readSession_ = value; + } else { + readSessionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReadSession( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder builderForValue) { + if (readSessionBuilder_ == null) { + readSession_ = builderForValue.build(); + } else { + readSessionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeReadSession(com.google.cloud.bigquery.storage.v1beta2.ReadSession value) { + if (readSessionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && readSession_ != null + && readSession_ + != com.google.cloud.bigquery.storage.v1beta2.ReadSession.getDefaultInstance()) { + getReadSessionBuilder().mergeFrom(value); + } else { + readSession_ = value; + } + } else { + readSessionBuilder_.mergeFrom(value); + } + if (readSession_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearReadSession() { + bitField0_ = (bitField0_ & ~0x00000002); + readSession_ = null; + if (readSessionBuilder_ != null) { + readSessionBuilder_.dispose(); + readSessionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder getReadSessionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getReadSessionFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder + getReadSessionOrBuilder() { + if (readSessionBuilder_ != null) { + return readSessionBuilder_.getMessageOrBuilder(); + } else { + return readSession_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.getDefaultInstance() + : readSession_; + } + } + + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder> + getReadSessionFieldBuilder() { + if (readSessionBuilder_ == null) { + readSessionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder>( + getReadSession(), getParentForChildren(), isClean()); + readSession_ = null; + } + return readSessionBuilder_; + } + + private int maxStreamCount_; + + /** + * + * + *
+     * Max initial number of streams. If unset or zero, the server will
+     * provide a value of streams so as to produce reasonable throughput. Must be
+     * non-negative. The number of streams may be lower than the requested number,
+     * depending on the amount parallelism that is reasonable for the table. Error
+     * will be returned if the max count is greater than the current system
+     * max limit of 1,000.
+     *
+     * Streams must be read starting from offset 0.
+     * 
+ * + * int32 max_stream_count = 3; + * + * @return The maxStreamCount. + */ + @java.lang.Override + public int getMaxStreamCount() { + return maxStreamCount_; + } + + /** + * + * + *
+     * Max initial number of streams. If unset or zero, the server will
+     * provide a value of streams so as to produce reasonable throughput. Must be
+     * non-negative. The number of streams may be lower than the requested number,
+     * depending on the amount parallelism that is reasonable for the table. Error
+     * will be returned if the max count is greater than the current system
+     * max limit of 1,000.
+     *
+     * Streams must be read starting from offset 0.
+     * 
+ * + * int32 max_stream_count = 3; + * + * @param value The maxStreamCount to set. + * @return This builder for chaining. + */ + public Builder setMaxStreamCount(int value) { + + maxStreamCount_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Max initial number of streams. If unset or zero, the server will
+     * provide a value of streams so as to produce reasonable throughput. Must be
+     * non-negative. The number of streams may be lower than the requested number,
+     * depending on the amount parallelism that is reasonable for the table. Error
+     * will be returned if the max count is greater than the current system
+     * max limit of 1,000.
+     *
+     * Streams must be read starting from offset 0.
+     * 
+ * + * int32 max_stream_count = 3; + * + * @return This builder for chaining. + */ + public Builder clearMaxStreamCount() { + bitField0_ = (bitField0_ & ~0x00000004); + maxStreamCount_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) + private static final com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateReadSessionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequestOrBuilder.java new file mode 100644 index 000000000000..12dc423dadb4 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequestOrBuilder.java @@ -0,0 +1,121 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface CreateReadSessionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The request project that owns the session, in the form of
+   * `projects/{project_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. The request project that owns the session, in the form of
+   * `projects/{project_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the readSession field is set. + */ + boolean hasReadSession(); + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The readSession. + */ + com.google.cloud.bigquery.storage.v1beta2.ReadSession getReadSession(); + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder getReadSessionOrBuilder(); + + /** + * + * + *
+   * Max initial number of streams. If unset or zero, the server will
+   * provide a value of streams so as to produce reasonable throughput. Must be
+   * non-negative. The number of streams may be lower than the requested number,
+   * depending on the amount parallelism that is reasonable for the table. Error
+   * will be returned if the max count is greater than the current system
+   * max limit of 1,000.
+   *
+   * Streams must be read starting from offset 0.
+   * 
+ * + * int32 max_stream_count = 3; + * + * @return The maxStreamCount. + */ + int getMaxStreamCount(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequest.java new file mode 100644 index 000000000000..eeb313d5df48 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequest.java @@ -0,0 +1,986 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Request message for `CreateWriteStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest} + */ +public final class CreateWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest) + CreateWriteStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use CreateWriteStreamRequest.newBuilder() to construct. + private CreateWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateWriteStreamRequest() { + parent_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateWriteStreamRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
+   * Required. Reference to the table to which the stream belongs, in the format
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Reference to the table to which the stream belongs, in the format
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int WRITE_STREAM_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta2.WriteStream writeStream_; + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeStream field is set. + */ + @java.lang.Override + public boolean hasWriteStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeStream. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.WriteStream getWriteStream() { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.WriteStream.getDefaultInstance() + : writeStream_; + } + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.WriteStreamOrBuilder getWriteStreamOrBuilder() { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.WriteStream.getDefaultInstance() + : writeStream_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getWriteStream()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getWriteStream()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest other = + (com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasWriteStream() != other.hasWriteStream()) return false; + if (hasWriteStream()) { + if (!getWriteStream().equals(other.getWriteStream())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasWriteStream()) { + hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getWriteStream().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `CreateWriteStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest) + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getWriteStreamFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + writeStream_ = null; + if (writeStreamBuilder_ != null) { + writeStreamBuilder_.dispose(); + writeStreamBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateWriteStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest build() { + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest result = + new com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.writeStream_ = + writeStreamBuilder_ == null ? writeStream_ : writeStreamBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasWriteStream()) { + mergeWriteStream(other.getWriteStream()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getWriteStreamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta2.WriteStream writeStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.WriteStream, + com.google.cloud.bigquery.storage.v1beta2.WriteStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.WriteStreamOrBuilder> + writeStreamBuilder_; + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeStream field is set. + */ + public boolean hasWriteStream() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeStream. + */ + public com.google.cloud.bigquery.storage.v1beta2.WriteStream getWriteStream() { + if (writeStreamBuilder_ == null) { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.WriteStream.getDefaultInstance() + : writeStream_; + } else { + return writeStreamBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setWriteStream(com.google.cloud.bigquery.storage.v1beta2.WriteStream value) { + if (writeStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writeStream_ = value; + } else { + writeStreamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setWriteStream( + com.google.cloud.bigquery.storage.v1beta2.WriteStream.Builder builderForValue) { + if (writeStreamBuilder_ == null) { + writeStream_ = builderForValue.build(); + } else { + writeStreamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeWriteStream(com.google.cloud.bigquery.storage.v1beta2.WriteStream value) { + if (writeStreamBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && writeStream_ != null + && writeStream_ + != com.google.cloud.bigquery.storage.v1beta2.WriteStream.getDefaultInstance()) { + getWriteStreamBuilder().mergeFrom(value); + } else { + writeStream_ = value; + } + } else { + writeStreamBuilder_.mergeFrom(value); + } + if (writeStream_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearWriteStream() { + bitField0_ = (bitField0_ & ~0x00000002); + writeStream_ = null; + if (writeStreamBuilder_ != null) { + writeStreamBuilder_.dispose(); + writeStreamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.WriteStream.Builder getWriteStreamBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getWriteStreamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.WriteStreamOrBuilder + getWriteStreamOrBuilder() { + if (writeStreamBuilder_ != null) { + return writeStreamBuilder_.getMessageOrBuilder(); + } else { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.WriteStream.getDefaultInstance() + : writeStream_; + } + } + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.WriteStream, + com.google.cloud.bigquery.storage.v1beta2.WriteStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.WriteStreamOrBuilder> + getWriteStreamFieldBuilder() { + if (writeStreamBuilder_ == null) { + writeStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.WriteStream, + com.google.cloud.bigquery.storage.v1beta2.WriteStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.WriteStreamOrBuilder>( + getWriteStream(), getParentForChildren(), isClean()); + writeStream_ = null; + } + return writeStreamBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest) + private static final com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateWriteStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequestOrBuilder.java new file mode 100644 index 000000000000..ab614acd01fc --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequestOrBuilder.java @@ -0,0 +1,101 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface CreateWriteStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.CreateWriteStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Reference to the table to which the stream belongs, in the format
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
+   * Required. Reference to the table to which the stream belongs, in the format
+   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeStream field is set. + */ + boolean hasWriteStream(); + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeStream. + */ + com.google.cloud.bigquery.storage.v1beta2.WriteStream getWriteStream(); + + /** + * + * + *
+   * Required. Stream to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.WriteStreamOrBuilder getWriteStreamOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/DataFormat.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/DataFormat.java new file mode 100644 index 000000000000..b48dcf7dc2bf --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/DataFormat.java @@ -0,0 +1,169 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Data format for input or output data.
+ * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1beta2.DataFormat} + */ +public enum DataFormat implements com.google.protobuf.ProtocolMessageEnum { + /** DATA_FORMAT_UNSPECIFIED = 0; */ + DATA_FORMAT_UNSPECIFIED(0), + /** + * + * + *
+   * Avro is a standard open source row based file format.
+   * See https://avro.apache.org/ for more details.
+   * 
+ * + * AVRO = 1; + */ + AVRO(1), + /** + * + * + *
+   * Arrow is a standard open source column-based message format.
+   * See https://arrow.apache.org/ for more details.
+   * 
+ * + * ARROW = 2; + */ + ARROW(2), + UNRECOGNIZED(-1), + ; + + /** DATA_FORMAT_UNSPECIFIED = 0; */ + public static final int DATA_FORMAT_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+   * Avro is a standard open source row based file format.
+   * See https://avro.apache.org/ for more details.
+   * 
+ * + * AVRO = 1; + */ + public static final int AVRO_VALUE = 1; + + /** + * + * + *
+   * Arrow is a standard open source column-based message format.
+   * See https://arrow.apache.org/ for more details.
+   * 
+ * + * ARROW = 2; + */ + public static final int ARROW_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static DataFormat valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static DataFormat forNumber(int value) { + switch (value) { + case 0: + return DATA_FORMAT_UNSPECIFIED; + case 1: + return AVRO; + case 2: + return ARROW; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public DataFormat findValueByNumber(int number) { + return DataFormat.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final DataFormat[] VALUES = values(); + + public static DataFormat valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private DataFormat(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1beta2.DataFormat) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequest.java new file mode 100644 index 000000000000..8b7ba725716c --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequest.java @@ -0,0 +1,667 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Request message for invoking `FinalizeWriteStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest} + */ +public final class FinalizeWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest) + FinalizeWriteStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use FinalizeWriteStreamRequest.newBuilder() to construct. + private FinalizeWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FinalizeWriteStreamRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FinalizeWriteStreamRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the stream to finalize, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the stream to finalize, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest other = + (com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for invoking `FinalizeWriteStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest) + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest build() { + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest result = + new com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest + .getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest) + private static final com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FinalizeWriteStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequestOrBuilder.java new file mode 100644 index 000000000000..af8da877cdc2 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequestOrBuilder.java @@ -0,0 +1,58 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface FinalizeWriteStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the stream to finalize, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the stream to finalize, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponse.java new file mode 100644 index 000000000000..8f93ccbb538a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponse.java @@ -0,0 +1,553 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Response message for `FinalizeWriteStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse} + */ +public final class FinalizeWriteStreamResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse) + FinalizeWriteStreamResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use FinalizeWriteStreamResponse.newBuilder() to construct. + private FinalizeWriteStreamResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FinalizeWriteStreamResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FinalizeWriteStreamResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse.class, + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse.Builder.class); + } + + public static final int ROW_COUNT_FIELD_NUMBER = 1; + private long rowCount_ = 0L; + + /** + * + * + *
+   * Number of rows in the finalized stream.
+   * 
+ * + * int64 row_count = 1; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (rowCount_ != 0L) { + output.writeInt64(1, rowCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (rowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, rowCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse other = + (com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse) obj; + + if (getRowCount() != other.getRowCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response message for `FinalizeWriteStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse) + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse.class, + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + rowCount_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse build() { + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse result = + new com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.rowCount_ = rowCount_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse + .getDefaultInstance()) return this; + if (other.getRowCount() != 0L) { + setRowCount(other.getRowCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + rowCount_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long rowCount_; + + /** + * + * + *
+     * Number of rows in the finalized stream.
+     * 
+ * + * int64 row_count = 1; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + /** + * + * + *
+     * Number of rows in the finalized stream.
+     * 
+ * + * int64 row_count = 1; + * + * @param value The rowCount to set. + * @return This builder for chaining. + */ + public Builder setRowCount(long value) { + + rowCount_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Number of rows in the finalized stream.
+     * 
+ * + * int64 row_count = 1; + * + * @return This builder for chaining. + */ + public Builder clearRowCount() { + bitField0_ = (bitField0_ & ~0x00000001); + rowCount_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse) + private static final com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FinalizeWriteStreamResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponseOrBuilder.java new file mode 100644 index 000000000000..1759cde84b50 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponseOrBuilder.java @@ -0,0 +1,39 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface FinalizeWriteStreamResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.FinalizeWriteStreamResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Number of rows in the finalized stream.
+   * 
+ * + * int64 row_count = 1; + * + * @return The rowCount. + */ + long getRowCount(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequest.java new file mode 100644 index 000000000000..df6659b47c53 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequest.java @@ -0,0 +1,944 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Request message for `FlushRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.FlushRowsRequest} + */ +public final class FlushRowsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.FlushRowsRequest) + FlushRowsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use FlushRowsRequest.newBuilder() to construct. + private FlushRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FlushRowsRequest() { + writeStream_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FlushRowsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest.class, + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest.Builder.class); + } + + private int bitField0_; + public static final int WRITE_STREAM_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object writeStream_ = ""; + + /** + * + * + *
+   * Required. The stream that is the target of the flush operation.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + @java.lang.Override + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The stream that is the target of the flush operation.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + @java.lang.Override + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OFFSET_FIELD_NUMBER = 2; + private com.google.protobuf.Int64Value offset_; + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + @java.lang.Override + public boolean hasOffset() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + @java.lang.Override + public com.google.protobuf.Int64Value getOffset() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + @java.lang.Override + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, writeStream_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getOffset()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, writeStream_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOffset()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest other = + (com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest) obj; + + if (!getWriteStream().equals(other.getWriteStream())) return false; + if (hasOffset() != other.hasOffset()) return false; + if (hasOffset()) { + if (!getOffset().equals(other.getOffset())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getWriteStream().hashCode(); + if (hasOffset()) { + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + getOffset().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `FlushRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.FlushRowsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.FlushRowsRequest) + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest.class, + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getOffsetFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + writeStream_ = ""; + offset_ = null; + if (offsetBuilder_ != null) { + offsetBuilder_.dispose(); + offsetBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest build() { + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest result = + new com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.writeStream_ = writeStream_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.offset_ = offsetBuilder_ == null ? offset_ : offsetBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest.getDefaultInstance()) + return this; + if (!other.getWriteStream().isEmpty()) { + writeStream_ = other.writeStream_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasOffset()) { + mergeOffset(other.getOffset()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + writeStream_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getOffsetFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object writeStream_ = ""; + + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStream(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + writeStream_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearWriteStream() { + writeStream_ = getDefaultInstance().getWriteStream(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The stream that is the target of the flush operation.
+     * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + writeStream_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Int64Value offset_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + offsetBuilder_; + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + public boolean hasOffset() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + public com.google.protobuf.Int64Value getOffset() { + if (offsetBuilder_ == null) { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } else { + return offsetBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder setOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + offset_ = value; + } else { + offsetBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder setOffset(com.google.protobuf.Int64Value.Builder builderForValue) { + if (offsetBuilder_ == null) { + offset_ = builderForValue.build(); + } else { + offsetBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder mergeOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && offset_ != null + && offset_ != com.google.protobuf.Int64Value.getDefaultInstance()) { + getOffsetBuilder().mergeFrom(value); + } else { + offset_ = value; + } + } else { + offsetBuilder_.mergeFrom(value); + } + if (offset_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public Builder clearOffset() { + bitField0_ = (bitField0_ & ~0x00000002); + offset_ = null; + if (offsetBuilder_ != null) { + offsetBuilder_.dispose(); + offsetBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public com.google.protobuf.Int64Value.Builder getOffsetBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getOffsetFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + if (offsetBuilder_ != null) { + return offsetBuilder_.getMessageOrBuilder(); + } else { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + } + + /** + * + * + *
+     * Ending offset of the flush operation. Rows before this offset(including
+     * this offset) will be flushed.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + getOffsetFieldBuilder() { + if (offsetBuilder_ == null) { + offsetBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder>( + getOffset(), getParentForChildren(), isClean()); + offset_ = null; + } + return offsetBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.FlushRowsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.FlushRowsRequest) + private static final com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FlushRowsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FlushRowsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequestOrBuilder.java new file mode 100644 index 000000000000..b958b669a5e0 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequestOrBuilder.java @@ -0,0 +1,96 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface FlushRowsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.FlushRowsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The stream that is the target of the flush operation.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The writeStream. + */ + java.lang.String getWriteStream(); + + /** + * + * + *
+   * Required. The stream that is the target of the flush operation.
+   * 
+ * + * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for writeStream. + */ + com.google.protobuf.ByteString getWriteStreamBytes(); + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return Whether the offset field is set. + */ + boolean hasOffset(); + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + * + * @return The offset. + */ + com.google.protobuf.Int64Value getOffset(); + + /** + * + * + *
+   * Ending offset of the flush operation. Rows before this offset(including
+   * this offset) will be flushed.
+   * 
+ * + * .google.protobuf.Int64Value offset = 2; + */ + com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponse.java new file mode 100644 index 000000000000..e3d83a223ae8 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponse.java @@ -0,0 +1,542 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Respond message for `FlushRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.FlushRowsResponse} + */ +public final class FlushRowsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.FlushRowsResponse) + FlushRowsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use FlushRowsResponse.newBuilder() to construct. + private FlushRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FlushRowsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FlushRowsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse.class, + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse.Builder.class); + } + + public static final int OFFSET_FIELD_NUMBER = 1; + private long offset_ = 0L; + + /** + * + * + *
+   * The rows before this offset (including this offset) are flushed.
+   * 
+ * + * int64 offset = 1; + * + * @return The offset. + */ + @java.lang.Override + public long getOffset() { + return offset_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (offset_ != 0L) { + output.writeInt64(1, offset_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (offset_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, offset_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse other = + (com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse) obj; + + if (getOffset() != other.getOffset()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Respond message for `FlushRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.FlushRowsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.FlushRowsResponse) + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse.class, + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + offset_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse build() { + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse result = + new com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.offset_ = offset_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse.getDefaultInstance()) + return this; + if (other.getOffset() != 0L) { + setOffset(other.getOffset()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + offset_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long offset_; + + /** + * + * + *
+     * The rows before this offset (including this offset) are flushed.
+     * 
+ * + * int64 offset = 1; + * + * @return The offset. + */ + @java.lang.Override + public long getOffset() { + return offset_; + } + + /** + * + * + *
+     * The rows before this offset (including this offset) are flushed.
+     * 
+ * + * int64 offset = 1; + * + * @param value The offset to set. + * @return This builder for chaining. + */ + public Builder setOffset(long value) { + + offset_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * The rows before this offset (including this offset) are flushed.
+     * 
+ * + * int64 offset = 1; + * + * @return This builder for chaining. + */ + public Builder clearOffset() { + bitField0_ = (bitField0_ & ~0x00000001); + offset_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.FlushRowsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.FlushRowsResponse) + private static final com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FlushRowsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.FlushRowsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponseOrBuilder.java new file mode 100644 index 000000000000..78c8f92fc38e --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponseOrBuilder.java @@ -0,0 +1,39 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface FlushRowsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.FlushRowsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The rows before this offset (including this offset) are flushed.
+   * 
+ * + * int64 offset = 1; + * + * @return The offset. + */ + long getOffset(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequest.java new file mode 100644 index 000000000000..8524cdacb5ac --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequest.java @@ -0,0 +1,663 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Request message for `GetWriteStreamRequest`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest} + */ +public final class GetWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest) + GetWriteStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use GetWriteStreamRequest.newBuilder() to construct. + private GetWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private GetWriteStreamRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new GetWriteStreamRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_GetWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_GetWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the stream to get, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the stream to get, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest other = + (com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `GetWriteStreamRequest`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest) + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_GetWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_GetWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_GetWriteStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest build() { + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest result = + new com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest) + private static final com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetWriteStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequestOrBuilder.java new file mode 100644 index 000000000000..7147dedbc04b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequestOrBuilder.java @@ -0,0 +1,58 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface GetWriteStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.GetWriteStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the stream to get, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the stream to get, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProjectName.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProjectName.java new file mode 100644 index 000000000000..a3e66be4b27d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProjectName.java @@ -0,0 +1,168 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ProjectName implements ResourceName { + private static final PathTemplate PROJECT = + PathTemplate.createWithoutUrlEncoding("projects/{project}"); + private volatile Map fieldValuesMap; + private final String project; + + @Deprecated + protected ProjectName() { + project = null; + } + + private ProjectName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + } + + public String getProject() { + return project; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ProjectName of(String project) { + return newBuilder().setProject(project).build(); + } + + public static String format(String project) { + return newBuilder().setProject(project).build().toString(); + } + + public static ProjectName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT.validatedMatch( + formattedString, "ProjectName.parse: formattedString not in valid format"); + return of(matchMap.get("project")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ProjectName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT.instantiate("project", project); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + ProjectName that = ((ProjectName) o); + return Objects.equals(this.project, that.project); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + return h; + } + + /** Builder for projects/{project}. */ + public static class Builder { + private String project; + + protected Builder() {} + + public String getProject() { + return project; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + private Builder(ProjectName projectName) { + this.project = projectName.project; + } + + public ProjectName build() { + return new ProjectName(this); + } + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoBufProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoBufProto.java new file mode 100644 index 000000000000..e0dd0c0ce4a0 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoBufProto.java @@ -0,0 +1,85 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/protobuf.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public final class ProtoBufProto { + private ProtoBufProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ProtoSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ProtoSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ProtoRows_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ProtoRows_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n4google/cloud/bigquery/storage/v1beta2/" + + "protobuf.proto\022%google.cloud.bigquery.st" + + "orage.v1beta2\032 google/protobuf/descripto" + + "r.proto\"I\n\013ProtoSchema\022:\n\020proto_descript" + + "or\030\001 \001(\0132 .google.protobuf.DescriptorPro" + + "to\"$\n\tProtoRows\022\027\n\017serialized_rows\030\001 \003(\014" + + "B\201\001\n)com.google.cloud.bigquery.storage.v" + + "1beta2B\rProtoBufProtoP\001ZCcloud.google.co" + + "m/go/bigquery/storage/apiv1beta2/storage" + + "pb;storagepbb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.DescriptorProtos.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ProtoSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_ProtoSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ProtoSchema_descriptor, + new java.lang.String[] { + "ProtoDescriptor", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ProtoRows_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta2_ProtoRows_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ProtoRows_descriptor, + new java.lang.String[] { + "SerializedRows", + }); + com.google.protobuf.DescriptorProtos.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRows.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRows.java new file mode 100644 index 000000000000..4e3e6e7aa52a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRows.java @@ -0,0 +1,696 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/protobuf.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ProtoRows} */ +public final class ProtoRows extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ProtoRows) + ProtoRowsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ProtoRows.newBuilder() to construct. + private ProtoRows(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ProtoRows() { + serializedRows_ = emptyList(com.google.protobuf.ByteString.class); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ProtoRows(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ProtoRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ProtoRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ProtoRows.class, + com.google.cloud.bigquery.storage.v1beta2.ProtoRows.Builder.class); + } + + public static final int SERIALIZED_ROWS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.Internal.ProtobufList + serializedRows_ = emptyList(com.google.protobuf.ByteString.class); + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   *
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return A list containing the serializedRows. + */ + @java.lang.Override + public java.util.List getSerializedRowsList() { + return serializedRows_; + } + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   *
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return The count of serializedRows. + */ + public int getSerializedRowsCount() { + return serializedRows_.size(); + } + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   *
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index of the element to return. + * @return The serializedRows at the given index. + */ + public com.google.protobuf.ByteString getSerializedRows(int index) { + return serializedRows_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < serializedRows_.size(); i++) { + output.writeBytes(1, serializedRows_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < serializedRows_.size(); i++) { + dataSize += + com.google.protobuf.CodedOutputStream.computeBytesSizeNoTag(serializedRows_.get(i)); + } + size += dataSize; + size += 1 * getSerializedRowsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ProtoRows)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ProtoRows other = + (com.google.cloud.bigquery.storage.v1beta2.ProtoRows) obj; + + if (!getSerializedRowsList().equals(other.getSerializedRowsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getSerializedRowsCount() > 0) { + hash = (37 * hash) + SERIALIZED_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getSerializedRowsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1beta2.ProtoRows prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ProtoRows} */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ProtoRows) + com.google.cloud.bigquery.storage.v1beta2.ProtoRowsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ProtoRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ProtoRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ProtoRows.class, + com.google.cloud.bigquery.storage.v1beta2.ProtoRows.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ProtoRows.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + serializedRows_ = emptyList(com.google.protobuf.ByteString.class); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ProtoRows_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ProtoRows getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ProtoRows.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ProtoRows build() { + com.google.cloud.bigquery.storage.v1beta2.ProtoRows result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ProtoRows buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ProtoRows result = + new com.google.cloud.bigquery.storage.v1beta2.ProtoRows(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.ProtoRows result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + serializedRows_.makeImmutable(); + result.serializedRows_ = serializedRows_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ProtoRows) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ProtoRows) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ProtoRows other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ProtoRows.getDefaultInstance()) + return this; + if (!other.serializedRows_.isEmpty()) { + if (serializedRows_.isEmpty()) { + serializedRows_ = other.serializedRows_; + serializedRows_.makeImmutable(); + bitField0_ |= 0x00000001; + } else { + ensureSerializedRowsIsMutable(); + serializedRows_.addAll(other.serializedRows_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.protobuf.ByteString v = input.readBytes(); + ensureSerializedRowsIsMutable(); + serializedRows_.add(v); + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Internal.ProtobufList + serializedRows_ = emptyList(com.google.protobuf.ByteString.class); + + private void ensureSerializedRowsIsMutable() { + if (!serializedRows_.isModifiable()) { + serializedRows_ = makeMutableCopy(serializedRows_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return A list containing the serializedRows. + */ + public java.util.List getSerializedRowsList() { + serializedRows_.makeImmutable(); + return serializedRows_; + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return The count of serializedRows. + */ + public int getSerializedRowsCount() { + return serializedRows_.size(); + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index of the element to return. + * @return The serializedRows at the given index. + */ + public com.google.protobuf.ByteString getSerializedRows(int index) { + return serializedRows_.get(index); + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index to set the value at. + * @param value The serializedRows to set. + * @return This builder for chaining. + */ + public Builder setSerializedRows(int index, com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSerializedRowsIsMutable(); + serializedRows_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param value The serializedRows to add. + * @return This builder for chaining. + */ + public Builder addSerializedRows(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSerializedRowsIsMutable(); + serializedRows_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param values The serializedRows to add. + * @return This builder for chaining. + */ + public Builder addAllSerializedRows( + java.lang.Iterable values) { + ensureSerializedRowsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, serializedRows_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     *
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedRows() { + serializedRows_ = emptyList(com.google.protobuf.ByteString.class); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ProtoRows) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ProtoRows) + private static final com.google.cloud.bigquery.storage.v1beta2.ProtoRows DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ProtoRows(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoRows getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProtoRows parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ProtoRows getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRowsOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRowsOrBuilder.java new file mode 100644 index 000000000000..93f5abf16948 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRowsOrBuilder.java @@ -0,0 +1,75 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/protobuf.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ProtoRowsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ProtoRows) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   *
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return A list containing the serializedRows. + */ + java.util.List getSerializedRowsList(); + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   *
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return The count of serializedRows. + */ + int getSerializedRowsCount(); + + /** + * + * + *
+   * A sequence of rows serialized as a Protocol Buffer.
+   *
+   * See https://developers.google.com/protocol-buffers/docs/overview for more
+   * information on deserializing this field.
+   * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index of the element to return. + * @return The serializedRows at the given index. + */ + com.google.protobuf.ByteString getSerializedRows(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchema.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchema.java new file mode 100644 index 000000000000..f819699424c8 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchema.java @@ -0,0 +1,778 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/protobuf.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * ProtoSchema describes the schema of the serialized protocol buffer data rows.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ProtoSchema} + */ +public final class ProtoSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ProtoSchema) + ProtoSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ProtoSchema.newBuilder() to construct. + private ProtoSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ProtoSchema() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ProtoSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ProtoSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ProtoSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.class, + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.Builder.class); + } + + private int bitField0_; + public static final int PROTO_DESCRIPTOR_FIELD_NUMBER = 1; + private com.google.protobuf.DescriptorProtos.DescriptorProto protoDescriptor_; + + /** + * + * + *
+   * Descriptor for input message. The descriptor has to be self contained,
+   * including all the nested types, excepted for proto buffer well known types
+   * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return Whether the protoDescriptor field is set. + */ + @java.lang.Override + public boolean hasProtoDescriptor() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Descriptor for input message. The descriptor has to be self contained,
+   * including all the nested types, excepted for proto buffer well known types
+   * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return The protoDescriptor. + */ + @java.lang.Override + public com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor() { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } + + /** + * + * + *
+   * Descriptor for input message. The descriptor has to be self contained,
+   * including all the nested types, excepted for proto buffer well known types
+   * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + @java.lang.Override + public com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder + getProtoDescriptorOrBuilder() { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasProtoDescriptor()) { + if (!getProtoDescriptor().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getProtoDescriptor()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getProtoDescriptor()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ProtoSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema other = + (com.google.cloud.bigquery.storage.v1beta2.ProtoSchema) obj; + + if (hasProtoDescriptor() != other.hasProtoDescriptor()) return false; + if (hasProtoDescriptor()) { + if (!getProtoDescriptor().equals(other.getProtoDescriptor())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasProtoDescriptor()) { + hash = (37 * hash) + PROTO_DESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getProtoDescriptor().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * ProtoSchema describes the schema of the serialized protocol buffer data rows.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ProtoSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ProtoSchema) + com.google.cloud.bigquery.storage.v1beta2.ProtoSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ProtoSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ProtoSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.class, + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getProtoDescriptorFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + protoDescriptor_ = null; + if (protoDescriptorBuilder_ != null) { + protoDescriptorBuilder_.dispose(); + protoDescriptorBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ProtoSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ProtoSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ProtoSchema build() { + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ProtoSchema buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ProtoSchema result = + new com.google.cloud.bigquery.storage.v1beta2.ProtoSchema(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.ProtoSchema result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.protoDescriptor_ = + protoDescriptorBuilder_ == null ? protoDescriptor_ : protoDescriptorBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ProtoSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ProtoSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ProtoSchema other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ProtoSchema.getDefaultInstance()) + return this; + if (other.hasProtoDescriptor()) { + mergeProtoDescriptor(other.getProtoDescriptor()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (hasProtoDescriptor()) { + if (!getProtoDescriptor().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getProtoDescriptorFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.DescriptorProtos.DescriptorProto protoDescriptor_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.DescriptorProtos.DescriptorProto, + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder> + protoDescriptorBuilder_; + + /** + * + * + *
+     * Descriptor for input message. The descriptor has to be self contained,
+     * including all the nested types, excepted for proto buffer well known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return Whether the protoDescriptor field is set. + */ + public boolean hasProtoDescriptor() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Descriptor for input message. The descriptor has to be self contained,
+     * including all the nested types, excepted for proto buffer well known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return The protoDescriptor. + */ + public com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor() { + if (protoDescriptorBuilder_ == null) { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } else { + return protoDescriptorBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Descriptor for input message. The descriptor has to be self contained,
+     * including all the nested types, excepted for proto buffer well known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder setProtoDescriptor(com.google.protobuf.DescriptorProtos.DescriptorProto value) { + if (protoDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + protoDescriptor_ = value; + } else { + protoDescriptorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Descriptor for input message. The descriptor has to be self contained,
+     * including all the nested types, excepted for proto buffer well known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder setProtoDescriptor( + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder builderForValue) { + if (protoDescriptorBuilder_ == null) { + protoDescriptor_ = builderForValue.build(); + } else { + protoDescriptorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Descriptor for input message. The descriptor has to be self contained,
+     * including all the nested types, excepted for proto buffer well known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder mergeProtoDescriptor( + com.google.protobuf.DescriptorProtos.DescriptorProto value) { + if (protoDescriptorBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && protoDescriptor_ != null + && protoDescriptor_ + != com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance()) { + getProtoDescriptorBuilder().mergeFrom(value); + } else { + protoDescriptor_ = value; + } + } else { + protoDescriptorBuilder_.mergeFrom(value); + } + if (protoDescriptor_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Descriptor for input message. The descriptor has to be self contained,
+     * including all the nested types, excepted for proto buffer well known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder clearProtoDescriptor() { + bitField0_ = (bitField0_ & ~0x00000001); + protoDescriptor_ = null; + if (protoDescriptorBuilder_ != null) { + protoDescriptorBuilder_.dispose(); + protoDescriptorBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Descriptor for input message. The descriptor has to be self contained,
+     * including all the nested types, excepted for proto buffer well known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public com.google.protobuf.DescriptorProtos.DescriptorProto.Builder + getProtoDescriptorBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getProtoDescriptorFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Descriptor for input message. The descriptor has to be self contained,
+     * including all the nested types, excepted for proto buffer well known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder + getProtoDescriptorOrBuilder() { + if (protoDescriptorBuilder_ != null) { + return protoDescriptorBuilder_.getMessageOrBuilder(); + } else { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } + } + + /** + * + * + *
+     * Descriptor for input message. The descriptor has to be self contained,
+     * including all the nested types, excepted for proto buffer well known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.DescriptorProtos.DescriptorProto, + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder> + getProtoDescriptorFieldBuilder() { + if (protoDescriptorBuilder_ == null) { + protoDescriptorBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.DescriptorProtos.DescriptorProto, + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder>( + getProtoDescriptor(), getParentForChildren(), isClean()); + protoDescriptor_ = null; + } + return protoDescriptorBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ProtoSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ProtoSchema) + private static final com.google.cloud.bigquery.storage.v1beta2.ProtoSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ProtoSchema(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ProtoSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProtoSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ProtoSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaOrBuilder.java new file mode 100644 index 000000000000..082c055bbd13 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaOrBuilder.java @@ -0,0 +1,69 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/protobuf.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ProtoSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ProtoSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Descriptor for input message. The descriptor has to be self contained,
+   * including all the nested types, excepted for proto buffer well known types
+   * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return Whether the protoDescriptor field is set. + */ + boolean hasProtoDescriptor(); + + /** + * + * + *
+   * Descriptor for input message. The descriptor has to be self contained,
+   * including all the nested types, excepted for proto buffer well known types
+   * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return The protoDescriptor. + */ + com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor(); + + /** + * + * + *
+   * Descriptor for input message. The descriptor has to be self contained,
+   * including all the nested types, excepted for proto buffer well known types
+   * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
+   * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder getProtoDescriptorOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequest.java new file mode 100644 index 000000000000..2c3651f75637 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequest.java @@ -0,0 +1,754 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Request message for `ReadRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadRowsRequest} + */ +public final class ReadRowsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) + ReadRowsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadRowsRequest.newBuilder() to construct. + private ReadRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadRowsRequest() { + readStream_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadRowsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.class, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.Builder.class); + } + + public static final int READ_STREAM_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object readStream_ = ""; + + /** + * + * + *
+   * Required. Stream to read rows from.
+   * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The readStream. + */ + @java.lang.Override + public java.lang.String getReadStream() { + java.lang.Object ref = readStream_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + readStream_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Stream to read rows from.
+   * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for readStream. + */ + @java.lang.Override + public com.google.protobuf.ByteString getReadStreamBytes() { + java.lang.Object ref = readStream_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + readStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OFFSET_FIELD_NUMBER = 2; + private long offset_ = 0L; + + /** + * + * + *
+   * The offset requested must be less than the last row read from Read.
+   * Requesting a larger offset is undefined. If not specified, start reading
+   * from offset zero.
+   * 
+ * + * int64 offset = 2; + * + * @return The offset. + */ + @java.lang.Override + public long getOffset() { + return offset_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(readStream_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, readStream_); + } + if (offset_ != 0L) { + output.writeInt64(2, offset_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(readStream_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, readStream_); + } + if (offset_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, offset_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest other = + (com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) obj; + + if (!getReadStream().equals(other.getReadStream())) return false; + if (getOffset() != other.getOffset()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + READ_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getReadStream().hashCode(); + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `ReadRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadRowsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.class, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + readStream_ = ""; + offset_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest build() { + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest result = + new com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.readStream_ = readStream_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.offset_ = offset_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.getDefaultInstance()) + return this; + if (!other.getReadStream().isEmpty()) { + readStream_ = other.readStream_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getOffset() != 0L) { + setOffset(other.getOffset()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + readStream_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + offset_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object readStream_ = ""; + + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The readStream. + */ + public java.lang.String getReadStream() { + java.lang.Object ref = readStream_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + readStream_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for readStream. + */ + public com.google.protobuf.ByteString getReadStreamBytes() { + java.lang.Object ref = readStream_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + readStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The readStream to set. + * @return This builder for chaining. + */ + public Builder setReadStream(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + readStream_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearReadStream() { + readStream_ = getDefaultInstance().getReadStream(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for readStream to set. + * @return This builder for chaining. + */ + public Builder setReadStreamBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + readStream_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long offset_; + + /** + * + * + *
+     * The offset requested must be less than the last row read from Read.
+     * Requesting a larger offset is undefined. If not specified, start reading
+     * from offset zero.
+     * 
+ * + * int64 offset = 2; + * + * @return The offset. + */ + @java.lang.Override + public long getOffset() { + return offset_; + } + + /** + * + * + *
+     * The offset requested must be less than the last row read from Read.
+     * Requesting a larger offset is undefined. If not specified, start reading
+     * from offset zero.
+     * 
+ * + * int64 offset = 2; + * + * @param value The offset to set. + * @return This builder for chaining. + */ + public Builder setOffset(long value) { + + offset_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * The offset requested must be less than the last row read from Read.
+     * Requesting a larger offset is undefined. If not specified, start reading
+     * from offset zero.
+     * 
+ * + * int64 offset = 2; + * + * @return This builder for chaining. + */ + public Builder clearOffset() { + bitField0_ = (bitField0_ & ~0x00000002); + offset_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) + private static final com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadRowsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequestOrBuilder.java new file mode 100644 index 000000000000..a77a3b1920d6 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequestOrBuilder.java @@ -0,0 +1,71 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ReadRowsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Stream to read rows from.
+   * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The readStream. + */ + java.lang.String getReadStream(); + + /** + * + * + *
+   * Required. Stream to read rows from.
+   * 
+ * + * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for readStream. + */ + com.google.protobuf.ByteString getReadStreamBytes(); + + /** + * + * + *
+   * The offset requested must be less than the last row read from Read.
+   * Requesting a larger offset is undefined. If not specified, start reading
+   * from offset zero.
+   * 
+ * + * int64 offset = 2; + * + * @return The offset. + */ + long getOffset(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java new file mode 100644 index 000000000000..20f56e562655 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java @@ -0,0 +1,2582 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Response from calling `ReadRows` may include row data, progress and
+ * throttling information.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadRowsResponse} + */ +public final class ReadRowsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) + ReadRowsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadRowsResponse.newBuilder() to construct. + private ReadRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadRowsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadRowsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.class, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.Builder.class); + } + + private int bitField0_; + private int rowsCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object rows_; + + public enum RowsCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + AVRO_ROWS(3), + ARROW_RECORD_BATCH(4), + ROWS_NOT_SET(0); + private final int value; + + private RowsCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RowsCase valueOf(int value) { + return forNumber(value); + } + + public static RowsCase forNumber(int value) { + switch (value) { + case 3: + return AVRO_ROWS; + case 4: + return ARROW_RECORD_BATCH; + case 0: + return ROWS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + private int schemaCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object schema_; + + public enum SchemaCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + AVRO_SCHEMA(7), + ARROW_SCHEMA(8), + SCHEMA_NOT_SET(0); + private final int value; + + private SchemaCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SchemaCase valueOf(int value) { + return forNumber(value); + } + + public static SchemaCase forNumber(int value) { + switch (value) { + case 7: + return AVRO_SCHEMA; + case 8: + return ARROW_SCHEMA; + case 0: + return SCHEMA_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public static final int AVRO_ROWS_FIELD_NUMBER = 3; + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + * + * @return Whether the avroRows field is set. + */ + @java.lang.Override + public boolean hasAvroRows() { + return rowsCase_ == 3; + } + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + * + * @return The avroRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroRows getAvroRows() { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder getAvroRowsOrBuilder() { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } + + public static final int ARROW_RECORD_BATCH_FIELD_NUMBER = 4; + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + * + * @return Whether the arrowRecordBatch field is set. + */ + @java.lang.Override + public boolean hasArrowRecordBatch() { + return rowsCase_ == 4; + } + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + * + * @return The arrowRecordBatch. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch getArrowRecordBatch() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder + getArrowRecordBatchOrBuilder() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } + + public static final int ROW_COUNT_FIELD_NUMBER = 6; + private long rowCount_ = 0L; + + /** + * + * + *
+   * Number of serialized rows in the rows block.
+   * 
+ * + * int64 row_count = 6; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + public static final int STATS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta2.StreamStats stats_; + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + * + * @return Whether the stats field is set. + */ + @java.lang.Override + public boolean hasStats() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + * + * @return The stats. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats getStats() { + return stats_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.getDefaultInstance() + : stats_; + } + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder getStatsOrBuilder() { + return stats_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.getDefaultInstance() + : stats_; + } + + public static final int THROTTLE_STATE_FIELD_NUMBER = 5; + private com.google.cloud.bigquery.storage.v1beta2.ThrottleState throttleState_; + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + * + * @return Whether the throttleState field is set. + */ + @java.lang.Override + public boolean hasThrottleState() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + * + * @return The throttleState. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState getThrottleState() { + return throttleState_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ThrottleState.getDefaultInstance() + : throttleState_; + } + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder + getThrottleStateOrBuilder() { + return throttleState_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ThrottleState.getDefaultInstance() + : throttleState_; + } + + public static final int AVRO_SCHEMA_FIELD_NUMBER = 7; + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 7; + } + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema getAvroSchema() { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder getAvroSchemaOrBuilder() { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + + public static final int ARROW_SCHEMA_FIELD_NUMBER = 8; + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 8; + } + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getArrowSchema() { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder getArrowSchemaOrBuilder() { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getStats()); + } + if (rowsCase_ == 3) { + output.writeMessage(3, (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_); + } + if (rowsCase_ == 4) { + output.writeMessage(4, (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(5, getThrottleState()); + } + if (rowCount_ != 0L) { + output.writeInt64(6, rowCount_); + } + if (schemaCase_ == 7) { + output.writeMessage(7, (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_); + } + if (schemaCase_ == 8) { + output.writeMessage(8, (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStats()); + } + if (rowsCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_); + } + if (rowsCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getThrottleState()); + } + if (rowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(6, rowCount_); + } + if (schemaCase_ == 7) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_); + } + if (schemaCase_ == 8) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse other = + (com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) obj; + + if (getRowCount() != other.getRowCount()) return false; + if (hasStats() != other.hasStats()) return false; + if (hasStats()) { + if (!getStats().equals(other.getStats())) return false; + } + if (hasThrottleState() != other.hasThrottleState()) return false; + if (hasThrottleState()) { + if (!getThrottleState().equals(other.getThrottleState())) return false; + } + if (!getRowsCase().equals(other.getRowsCase())) return false; + switch (rowsCase_) { + case 3: + if (!getAvroRows().equals(other.getAvroRows())) return false; + break; + case 4: + if (!getArrowRecordBatch().equals(other.getArrowRecordBatch())) return false; + break; + case 0: + default: + } + if (!getSchemaCase().equals(other.getSchemaCase())) return false; + switch (schemaCase_) { + case 7: + if (!getAvroSchema().equals(other.getAvroSchema())) return false; + break; + case 8: + if (!getArrowSchema().equals(other.getArrowSchema())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); + if (hasStats()) { + hash = (37 * hash) + STATS_FIELD_NUMBER; + hash = (53 * hash) + getStats().hashCode(); + } + if (hasThrottleState()) { + hash = (37 * hash) + THROTTLE_STATE_FIELD_NUMBER; + hash = (53 * hash) + getThrottleState().hashCode(); + } + switch (rowsCase_) { + case 3: + hash = (37 * hash) + AVRO_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getAvroRows().hashCode(); + break; + case 4: + hash = (37 * hash) + ARROW_RECORD_BATCH_FIELD_NUMBER; + hash = (53 * hash) + getArrowRecordBatch().hashCode(); + break; + case 0: + default: + } + switch (schemaCase_) { + case 7: + hash = (37 * hash) + AVRO_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getAvroSchema().hashCode(); + break; + case 8: + hash = (37 * hash) + ARROW_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getArrowSchema().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Response from calling `ReadRows` may include row data, progress and
+   * throttling information.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadRowsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.class, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getStatsFieldBuilder(); + getThrottleStateFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (avroRowsBuilder_ != null) { + avroRowsBuilder_.clear(); + } + if (arrowRecordBatchBuilder_ != null) { + arrowRecordBatchBuilder_.clear(); + } + rowCount_ = 0L; + stats_ = null; + if (statsBuilder_ != null) { + statsBuilder_.dispose(); + statsBuilder_ = null; + } + throttleState_ = null; + if (throttleStateBuilder_ != null) { + throttleStateBuilder_.dispose(); + throttleStateBuilder_ = null; + } + if (avroSchemaBuilder_ != null) { + avroSchemaBuilder_.clear(); + } + if (arrowSchemaBuilder_ != null) { + arrowSchemaBuilder_.clear(); + } + rowsCase_ = 0; + rows_ = null; + schemaCase_ = 0; + schema_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse build() { + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse result = + new com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.rowCount_ = rowCount_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.stats_ = statsBuilder_ == null ? stats_ : statsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.throttleState_ = + throttleStateBuilder_ == null ? throttleState_ : throttleStateBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse result) { + result.rowsCase_ = rowsCase_; + result.rows_ = this.rows_; + if (rowsCase_ == 3 && avroRowsBuilder_ != null) { + result.rows_ = avroRowsBuilder_.build(); + } + if (rowsCase_ == 4 && arrowRecordBatchBuilder_ != null) { + result.rows_ = arrowRecordBatchBuilder_.build(); + } + result.schemaCase_ = schemaCase_; + result.schema_ = this.schema_; + if (schemaCase_ == 7 && avroSchemaBuilder_ != null) { + result.schema_ = avroSchemaBuilder_.build(); + } + if (schemaCase_ == 8 && arrowSchemaBuilder_ != null) { + result.schema_ = arrowSchemaBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.getDefaultInstance()) + return this; + if (other.getRowCount() != 0L) { + setRowCount(other.getRowCount()); + } + if (other.hasStats()) { + mergeStats(other.getStats()); + } + if (other.hasThrottleState()) { + mergeThrottleState(other.getThrottleState()); + } + switch (other.getRowsCase()) { + case AVRO_ROWS: + { + mergeAvroRows(other.getAvroRows()); + break; + } + case ARROW_RECORD_BATCH: + { + mergeArrowRecordBatch(other.getArrowRecordBatch()); + break; + } + case ROWS_NOT_SET: + { + break; + } + } + switch (other.getSchemaCase()) { + case AVRO_SCHEMA: + { + mergeAvroSchema(other.getAvroSchema()); + break; + } + case ARROW_SCHEMA: + { + mergeArrowSchema(other.getArrowSchema()); + break; + } + case SCHEMA_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + input.readMessage(getStatsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 18 + case 26: + { + input.readMessage(getAvroRowsFieldBuilder().getBuilder(), extensionRegistry); + rowsCase_ = 3; + break; + } // case 26 + case 34: + { + input.readMessage( + getArrowRecordBatchFieldBuilder().getBuilder(), extensionRegistry); + rowsCase_ = 4; + break; + } // case 34 + case 42: + { + input.readMessage(getThrottleStateFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 48: + { + rowCount_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 48 + case 58: + { + input.readMessage(getAvroSchemaFieldBuilder().getBuilder(), extensionRegistry); + schemaCase_ = 7; + break; + } // case 58 + case 66: + { + input.readMessage(getArrowSchemaFieldBuilder().getBuilder(), extensionRegistry); + schemaCase_ = 8; + break; + } // case 66 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int rowsCase_ = 0; + private java.lang.Object rows_; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public Builder clearRows() { + rowsCase_ = 0; + rows_ = null; + onChanged(); + return this; + } + + private int schemaCase_ = 0; + private java.lang.Object schema_; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public Builder clearSchema() { + schemaCase_ = 0; + schema_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroRows, + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder> + avroRowsBuilder_; + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + * + * @return Whether the avroRows field is set. + */ + @java.lang.Override + public boolean hasAvroRows() { + return rowsCase_ == 3; + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + * + * @return The avroRows. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroRows getAvroRows() { + if (avroRowsBuilder_ == null) { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } else { + if (rowsCase_ == 3) { + return avroRowsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + public Builder setAvroRows(com.google.cloud.bigquery.storage.v1beta2.AvroRows value) { + if (avroRowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + avroRowsBuilder_.setMessage(value); + } + rowsCase_ = 3; + return this; + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + public Builder setAvroRows( + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder builderForValue) { + if (avroRowsBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + avroRowsBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 3; + return this; + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + public Builder mergeAvroRows(com.google.cloud.bigquery.storage.v1beta2.AvroRows value) { + if (avroRowsBuilder_ == null) { + if (rowsCase_ == 3 + && rows_ != com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1beta2.AvroRows.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 3) { + avroRowsBuilder_.mergeFrom(value); + } else { + avroRowsBuilder_.setMessage(value); + } + } + rowsCase_ = 3; + return this; + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + public Builder clearAvroRows() { + if (avroRowsBuilder_ == null) { + if (rowsCase_ == 3) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 3) { + rowsCase_ = 0; + rows_ = null; + } + avroRowsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder getAvroRowsBuilder() { + return getAvroRowsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder getAvroRowsOrBuilder() { + if ((rowsCase_ == 3) && (avroRowsBuilder_ != null)) { + return avroRowsBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroRows, + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder> + getAvroRowsFieldBuilder() { + if (avroRowsBuilder_ == null) { + if (!(rowsCase_ == 3)) { + rows_ = com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } + avroRowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroRows, + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 3; + onChanged(); + return avroRowsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder> + arrowRecordBatchBuilder_; + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + * + * @return Whether the arrowRecordBatch field is set. + */ + @java.lang.Override + public boolean hasArrowRecordBatch() { + return rowsCase_ == 4; + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + * + * @return The arrowRecordBatch. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch getArrowRecordBatch() { + if (arrowRecordBatchBuilder_ == null) { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } else { + if (rowsCase_ == 4) { + return arrowRecordBatchBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + public Builder setArrowRecordBatch( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch value) { + if (arrowRecordBatchBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + arrowRecordBatchBuilder_.setMessage(value); + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + public Builder setArrowRecordBatch( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder builderForValue) { + if (arrowRecordBatchBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + arrowRecordBatchBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + public Builder mergeArrowRecordBatch( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch value) { + if (arrowRecordBatchBuilder_ == null) { + if (rowsCase_ == 4 + && rows_ + != com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch + .getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 4) { + arrowRecordBatchBuilder_.mergeFrom(value); + } else { + arrowRecordBatchBuilder_.setMessage(value); + } + } + rowsCase_ = 4; + return this; + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + public Builder clearArrowRecordBatch() { + if (arrowRecordBatchBuilder_ == null) { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + } + arrowRecordBatchBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder + getArrowRecordBatchBuilder() { + return getArrowRecordBatchFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder + getArrowRecordBatchOrBuilder() { + if ((rowsCase_ == 4) && (arrowRecordBatchBuilder_ != null)) { + return arrowRecordBatchBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder> + getArrowRecordBatchFieldBuilder() { + if (arrowRecordBatchBuilder_ == null) { + if (!(rowsCase_ == 4)) { + rows_ = com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } + arrowRecordBatchBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 4; + onChanged(); + return arrowRecordBatchBuilder_; + } + + private long rowCount_; + + /** + * + * + *
+     * Number of serialized rows in the rows block.
+     * 
+ * + * int64 row_count = 6; + * + * @return The rowCount. + */ + @java.lang.Override + public long getRowCount() { + return rowCount_; + } + + /** + * + * + *
+     * Number of serialized rows in the rows block.
+     * 
+ * + * int64 row_count = 6; + * + * @param value The rowCount to set. + * @return This builder for chaining. + */ + public Builder setRowCount(long value) { + + rowCount_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Number of serialized rows in the rows block.
+     * 
+ * + * int64 row_count = 6; + * + * @return This builder for chaining. + */ + public Builder clearRowCount() { + bitField0_ = (bitField0_ & ~0x00000004); + rowCount_ = 0L; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta2.StreamStats stats_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StreamStats, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder, + com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder> + statsBuilder_; + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + * + * @return Whether the stats field is set. + */ + public boolean hasStats() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + * + * @return The stats. + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats getStats() { + if (statsBuilder_ == null) { + return stats_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.getDefaultInstance() + : stats_; + } else { + return statsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public Builder setStats(com.google.cloud.bigquery.storage.v1beta2.StreamStats value) { + if (statsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stats_ = value; + } else { + statsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public Builder setStats( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder builderForValue) { + if (statsBuilder_ == null) { + stats_ = builderForValue.build(); + } else { + statsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public Builder mergeStats(com.google.cloud.bigquery.storage.v1beta2.StreamStats value) { + if (statsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && stats_ != null + && stats_ + != com.google.cloud.bigquery.storage.v1beta2.StreamStats.getDefaultInstance()) { + getStatsBuilder().mergeFrom(value); + } else { + stats_ = value; + } + } else { + statsBuilder_.mergeFrom(value); + } + if (stats_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public Builder clearStats() { + bitField0_ = (bitField0_ & ~0x00000008); + stats_ = null; + if (statsBuilder_ != null) { + statsBuilder_.dispose(); + statsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder getStatsBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getStatsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder getStatsOrBuilder() { + if (statsBuilder_ != null) { + return statsBuilder_.getMessageOrBuilder(); + } else { + return stats_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.getDefaultInstance() + : stats_; + } + } + + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StreamStats, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder, + com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder> + getStatsFieldBuilder() { + if (statsBuilder_ == null) { + statsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StreamStats, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder, + com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder>( + getStats(), getParentForChildren(), isClean()); + stats_ = null; + } + return statsBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta2.ThrottleState throttleState_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ThrottleState, + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder, + com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder> + throttleStateBuilder_; + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + * + * @return Whether the throttleState field is set. + */ + public boolean hasThrottleState() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + * + * @return The throttleState. + */ + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState getThrottleState() { + if (throttleStateBuilder_ == null) { + return throttleState_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ThrottleState.getDefaultInstance() + : throttleState_; + } else { + return throttleStateBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public Builder setThrottleState(com.google.cloud.bigquery.storage.v1beta2.ThrottleState value) { + if (throttleStateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + throttleState_ = value; + } else { + throttleStateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public Builder setThrottleState( + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder builderForValue) { + if (throttleStateBuilder_ == null) { + throttleState_ = builderForValue.build(); + } else { + throttleStateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public Builder mergeThrottleState( + com.google.cloud.bigquery.storage.v1beta2.ThrottleState value) { + if (throttleStateBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && throttleState_ != null + && throttleState_ + != com.google.cloud.bigquery.storage.v1beta2.ThrottleState.getDefaultInstance()) { + getThrottleStateBuilder().mergeFrom(value); + } else { + throttleState_ = value; + } + } else { + throttleStateBuilder_.mergeFrom(value); + } + if (throttleState_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public Builder clearThrottleState() { + bitField0_ = (bitField0_ & ~0x00000010); + throttleState_ = null; + if (throttleStateBuilder_ != null) { + throttleStateBuilder_.dispose(); + throttleStateBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder + getThrottleStateBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getThrottleStateFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder + getThrottleStateOrBuilder() { + if (throttleStateBuilder_ != null) { + return throttleStateBuilder_.getMessageOrBuilder(); + } else { + return throttleState_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ThrottleState.getDefaultInstance() + : throttleState_; + } + } + + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ThrottleState, + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder, + com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder> + getThrottleStateFieldBuilder() { + if (throttleStateBuilder_ == null) { + throttleStateBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ThrottleState, + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder, + com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder>( + getThrottleState(), getParentForChildren(), isClean()); + throttleState_ = null; + } + return throttleStateBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroSchema, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder> + avroSchemaBuilder_; + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 7; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema getAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } else { + if (schemaCase_ == 7) { + return avroSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema(com.google.cloud.bigquery.storage.v1beta2.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + avroSchemaBuilder_.setMessage(value); + } + schemaCase_ = 7; + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema( + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder builderForValue) { + if (avroSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + avroSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 7; + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeAvroSchema(com.google.cloud.bigquery.storage.v1beta2.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 7 + && schema_ + != com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 7) { + avroSchemaBuilder_.mergeFrom(value); + } else { + avroSchemaBuilder_.setMessage(value); + } + } + schemaCase_ = 7; + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 7) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 7) { + schemaCase_ = 0; + schema_ = null; + } + avroSchemaBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder getAvroSchemaBuilder() { + return getAvroSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder getAvroSchemaOrBuilder() { + if ((schemaCase_ == 7) && (avroSchemaBuilder_ != null)) { + return avroSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroSchema, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder> + getAvroSchemaFieldBuilder() { + if (avroSchemaBuilder_ == null) { + if (!(schemaCase_ == 7)) { + schema_ = com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + avroSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroSchema, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 7; + onChanged(); + return avroSchemaBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder> + arrowSchemaBuilder_; + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 8; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } else { + if (schemaCase_ == 8) { + return arrowSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema(com.google.cloud.bigquery.storage.v1beta2.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(value); + } + schemaCase_ = 8; + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema( + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder builderForValue) { + if (arrowSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 8; + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeArrowSchema(com.google.cloud.bigquery.storage.v1beta2.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 8 + && schema_ + != com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 8) { + arrowSchemaBuilder_.mergeFrom(value); + } else { + arrowSchemaBuilder_.setMessage(value); + } + } + schemaCase_ = 8; + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 8) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 8) { + schemaCase_ = 0; + schema_ = null; + } + arrowSchemaBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder getArrowSchemaBuilder() { + return getArrowSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder + getArrowSchemaOrBuilder() { + if ((schemaCase_ == 8) && (arrowSchemaBuilder_ != null)) { + return arrowSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder> + getArrowSchemaFieldBuilder() { + if (arrowSchemaBuilder_ == null) { + if (!(schemaCase_ == 8)) { + schema_ = com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + arrowSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 8; + onChanged(); + return arrowSchemaBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) + private static final com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadRowsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java new file mode 100644 index 000000000000..f313414e5a71 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java @@ -0,0 +1,281 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ReadRowsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + * + * @return Whether the avroRows field is set. + */ + boolean hasAvroRows(); + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + * + * @return The avroRows. + */ + com.google.cloud.bigquery.storage.v1beta2.AvroRows getAvroRows(); + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder getAvroRowsOrBuilder(); + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + * + * @return Whether the arrowRecordBatch field is set. + */ + boolean hasArrowRecordBatch(); + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + * + * @return The arrowRecordBatch. + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch getArrowRecordBatch(); + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder + getArrowRecordBatchOrBuilder(); + + /** + * + * + *
+   * Number of serialized rows in the rows block.
+   * 
+ * + * int64 row_count = 6; + * + * @return The rowCount. + */ + long getRowCount(); + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + * + * @return Whether the stats field is set. + */ + boolean hasStats(); + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + * + * @return The stats. + */ + com.google.cloud.bigquery.storage.v1beta2.StreamStats getStats(); + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder getStatsOrBuilder(); + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + * + * @return Whether the throttleState field is set. + */ + boolean hasThrottleState(); + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + * + * @return The throttleState. + */ + com.google.cloud.bigquery.storage.v1beta2.ThrottleState getThrottleState(); + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder getThrottleStateOrBuilder(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + boolean hasAvroSchema(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + com.google.cloud.bigquery.storage.v1beta2.AvroSchema getAvroSchema(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder getAvroSchemaOrBuilder(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + boolean hasArrowSchema(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getArrowSchema(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder getArrowSchemaOrBuilder(); + + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.RowsCase getRowsCase(); + + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.SchemaCase getSchemaCase(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java new file mode 100644 index 000000000000..325967406d0f --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java @@ -0,0 +1,5734 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Information about the ReadSession.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadSession} + */ +public final class ReadSession extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ReadSession) + ReadSessionOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadSession.newBuilder() to construct. + private ReadSession(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadSession() { + name_ = ""; + dataFormat_ = 0; + table_ = ""; + streams_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadSession(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.class, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder.class); + } + + public interface TableModifiersOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return Whether the snapshotTime field is set. + */ + boolean hasSnapshotTime(); + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return The snapshotTime. + */ + com.google.protobuf.Timestamp getSnapshotTime(); + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + com.google.protobuf.TimestampOrBuilder getSnapshotTimeOrBuilder(); + } + + /** + * + * + *
+   * Additional attributes when reading a table.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers} + */ + public static final class TableModifiers extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) + TableModifiersOrBuilder { + private static final long serialVersionUID = 0L; + + // Use TableModifiers.newBuilder() to construct. + private TableModifiers(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableModifiers() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableModifiers(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.class, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder.class); + } + + private int bitField0_; + public static final int SNAPSHOT_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp snapshotTime_; + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return Whether the snapshotTime field is set. + */ + @java.lang.Override + public boolean hasSnapshotTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return The snapshotTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getSnapshotTime() { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getSnapshotTimeOrBuilder() { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getSnapshotTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getSnapshotTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers other = + (com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) obj; + + if (hasSnapshotTime() != other.hasSnapshotTime()) return false; + if (hasSnapshotTime()) { + if (!getSnapshotTime().equals(other.getSnapshotTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasSnapshotTime()) { + hash = (37 * hash) + SNAPSHOT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getSnapshotTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Additional attributes when reading a table.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.class, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getSnapshotTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + snapshotTime_ = null; + if (snapshotTimeBuilder_ != null) { + snapshotTimeBuilder_.dispose(); + snapshotTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers build() { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers result = + new com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.snapshotTime_ = + snapshotTimeBuilder_ == null ? snapshotTime_ : snapshotTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + .getDefaultInstance()) return this; + if (other.hasSnapshotTime()) { + mergeSnapshotTime(other.getSnapshotTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getSnapshotTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp snapshotTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + snapshotTimeBuilder_; + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return Whether the snapshotTime field is set. + */ + public boolean hasSnapshotTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return The snapshotTime. + */ + public com.google.protobuf.Timestamp getSnapshotTime() { + if (snapshotTimeBuilder_ == null) { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } else { + return snapshotTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder setSnapshotTime(com.google.protobuf.Timestamp value) { + if (snapshotTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + snapshotTime_ = value; + } else { + snapshotTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder setSnapshotTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (snapshotTimeBuilder_ == null) { + snapshotTime_ = builderForValue.build(); + } else { + snapshotTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder mergeSnapshotTime(com.google.protobuf.Timestamp value) { + if (snapshotTimeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && snapshotTime_ != null + && snapshotTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getSnapshotTimeBuilder().mergeFrom(value); + } else { + snapshotTime_ = value; + } + } else { + snapshotTimeBuilder_.mergeFrom(value); + } + if (snapshotTime_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder clearSnapshotTime() { + bitField0_ = (bitField0_ & ~0x00000001); + snapshotTime_ = null; + if (snapshotTimeBuilder_ != null) { + snapshotTimeBuilder_.dispose(); + snapshotTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public com.google.protobuf.Timestamp.Builder getSnapshotTimeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getSnapshotTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getSnapshotTimeOrBuilder() { + if (snapshotTimeBuilder_ != null) { + return snapshotTimeBuilder_.getMessageOrBuilder(); + } else { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } + } + + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getSnapshotTimeFieldBuilder() { + if (snapshotTimeBuilder_ == null) { + snapshotTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getSnapshotTime(), getParentForChildren(), isClean()); + snapshotTime_ = null; + } + return snapshotTimeBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) + private static final com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableModifiers parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface TableReadOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return A list containing the selectedFields. + */ + java.util.List getSelectedFieldsList(); + + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return The count of selectedFields. + */ + int getSelectedFieldsCount(); + + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the element to return. + * @return The selectedFields at the given index. + */ + java.lang.String getSelectedFields(int index); + + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the value to return. + * @return The bytes of the selectedFields at the given index. + */ + com.google.protobuf.ByteString getSelectedFieldsBytes(int index); + + /** + * + * + *
+     * SQL text filtering statement, similar to a WHERE clause in a query.
+     * Aggregates are not supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string row_restriction = 2; + * + * @return The rowRestriction. + */ + java.lang.String getRowRestriction(); + + /** + * + * + *
+     * SQL text filtering statement, similar to a WHERE clause in a query.
+     * Aggregates are not supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string row_restriction = 2; + * + * @return The bytes for rowRestriction. + */ + com.google.protobuf.ByteString getRowRestrictionBytes(); + + /** + * + * + *
+     * Optional. Options specific to the Apache Arrow output format.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the arrowSerializationOptions field is set. + */ + boolean hasArrowSerializationOptions(); + + /** + * + * + *
+     * Optional. Options specific to the Apache Arrow output format.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The arrowSerializationOptions. + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + getArrowSerializationOptions(); + + /** + * + * + *
+     * Optional. Options specific to the Apache Arrow output format.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptionsOrBuilder + getArrowSerializationOptionsOrBuilder(); + } + + /** + * + * + *
+   * Options dictating how we read a table.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions} + */ + public static final class TableReadOptions extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) + TableReadOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use TableReadOptions.newBuilder() to construct. + private TableReadOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableReadOptions() { + selectedFields_ = com.google.protobuf.LazyStringArrayList.emptyList(); + rowRestriction_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableReadOptions(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.class, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder.class); + } + + private int bitField0_; + public static final int SELECTED_FIELDS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList selectedFields_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return A list containing the selectedFields. + */ + public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { + return selectedFields_; + } + + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return The count of selectedFields. + */ + public int getSelectedFieldsCount() { + return selectedFields_.size(); + } + + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the element to return. + * @return The selectedFields at the given index. + */ + public java.lang.String getSelectedFields(int index) { + return selectedFields_.get(index); + } + + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the value to return. + * @return The bytes of the selectedFields at the given index. + */ + public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { + return selectedFields_.getByteString(index); + } + + public static final int ROW_RESTRICTION_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object rowRestriction_ = ""; + + /** + * + * + *
+     * SQL text filtering statement, similar to a WHERE clause in a query.
+     * Aggregates are not supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string row_restriction = 2; + * + * @return The rowRestriction. + */ + @java.lang.Override + public java.lang.String getRowRestriction() { + java.lang.Object ref = rowRestriction_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rowRestriction_ = s; + return s; + } + } + + /** + * + * + *
+     * SQL text filtering statement, similar to a WHERE clause in a query.
+     * Aggregates are not supported.
+     *
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     *
+     * Restricted to a maximum length for 1 MB.
+     * 
+ * + * string row_restriction = 2; + * + * @return The bytes for rowRestriction. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRowRestrictionBytes() { + java.lang.Object ref = rowRestriction_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rowRestriction_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ARROW_SERIALIZATION_OPTIONS_FIELD_NUMBER = 3; + private com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + arrowSerializationOptions_; + + /** + * + * + *
+     * Optional. Options specific to the Apache Arrow output format.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the arrowSerializationOptions field is set. + */ + @java.lang.Override + public boolean hasArrowSerializationOptions() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Optional. Options specific to the Apache Arrow output format.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The arrowSerializationOptions. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + getArrowSerializationOptions() { + return arrowSerializationOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.getDefaultInstance() + : arrowSerializationOptions_; + } + + /** + * + * + *
+     * Optional. Options specific to the Apache Arrow output format.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptionsOrBuilder + getArrowSerializationOptionsOrBuilder() { + return arrowSerializationOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.getDefaultInstance() + : arrowSerializationOptions_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < selectedFields_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, selectedFields_.getRaw(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(rowRestriction_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, rowRestriction_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getArrowSerializationOptions()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < selectedFields_.size(); i++) { + dataSize += computeStringSizeNoTag(selectedFields_.getRaw(i)); + } + size += dataSize; + size += 1 * getSelectedFieldsList().size(); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(rowRestriction_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, rowRestriction_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, getArrowSerializationOptions()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions other = + (com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) obj; + + if (!getSelectedFieldsList().equals(other.getSelectedFieldsList())) return false; + if (!getRowRestriction().equals(other.getRowRestriction())) return false; + if (hasArrowSerializationOptions() != other.hasArrowSerializationOptions()) return false; + if (hasArrowSerializationOptions()) { + if (!getArrowSerializationOptions().equals(other.getArrowSerializationOptions())) + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getSelectedFieldsCount() > 0) { + hash = (37 * hash) + SELECTED_FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getSelectedFieldsList().hashCode(); + } + hash = (37 * hash) + ROW_RESTRICTION_FIELD_NUMBER; + hash = (53 * hash) + getRowRestriction().hashCode(); + if (hasArrowSerializationOptions()) { + hash = (37 * hash) + ARROW_SERIALIZATION_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getArrowSerializationOptions().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+     * Options dictating how we read a table.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.class, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getArrowSerializationOptionsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + selectedFields_ = com.google.protobuf.LazyStringArrayList.emptyList(); + rowRestriction_ = ""; + arrowSerializationOptions_ = null; + if (arrowSerializationOptionsBuilder_ != null) { + arrowSerializationOptionsBuilder_.dispose(); + arrowSerializationOptionsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions build() { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions result = + new com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + selectedFields_.makeImmutable(); + result.selectedFields_ = selectedFields_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.rowRestriction_ = rowRestriction_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.arrowSerializationOptions_ = + arrowSerializationOptionsBuilder_ == null + ? arrowSerializationOptions_ + : arrowSerializationOptionsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .getDefaultInstance()) return this; + if (!other.selectedFields_.isEmpty()) { + if (selectedFields_.isEmpty()) { + selectedFields_ = other.selectedFields_; + bitField0_ |= 0x00000001; + } else { + ensureSelectedFieldsIsMutable(); + selectedFields_.addAll(other.selectedFields_); + } + onChanged(); + } + if (!other.getRowRestriction().isEmpty()) { + rowRestriction_ = other.rowRestriction_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasArrowSerializationOptions()) { + mergeArrowSerializationOptions(other.getArrowSerializationOptions()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureSelectedFieldsIsMutable(); + selectedFields_.add(s); + break; + } // case 10 + case 18: + { + rowRestriction_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + getArrowSerializationOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList selectedFields_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureSelectedFieldsIsMutable() { + if (!selectedFields_.isModifiable()) { + selectedFields_ = new com.google.protobuf.LazyStringArrayList(selectedFields_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @return A list containing the selectedFields. + */ + public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { + selectedFields_.makeImmutable(); + return selectedFields_; + } + + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @return The count of selectedFields. + */ + public int getSelectedFieldsCount() { + return selectedFields_.size(); + } + + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the element to return. + * @return The selectedFields at the given index. + */ + public java.lang.String getSelectedFields(int index) { + return selectedFields_.get(index); + } + + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the value to return. + * @return The bytes of the selectedFields at the given index. + */ + public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { + return selectedFields_.getByteString(index); + } + + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index to set the value at. + * @param value The selectedFields to set. + * @return This builder for chaining. + */ + public Builder setSelectedFields(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSelectedFieldsIsMutable(); + selectedFields_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param value The selectedFields to add. + * @return This builder for chaining. + */ + public Builder addSelectedFields(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSelectedFieldsIsMutable(); + selectedFields_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param values The selectedFields to add. + * @return This builder for chaining. + */ + public Builder addAllSelectedFields(java.lang.Iterable values) { + ensureSelectedFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, selectedFields_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @return This builder for chaining. + */ + public Builder clearSelectedFields() { + selectedFields_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param value The bytes of the selectedFields to add. + * @return This builder for chaining. + */ + public Builder addSelectedFieldsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureSelectedFieldsIsMutable(); + selectedFields_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object rowRestriction_ = ""; + + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @return The rowRestriction. + */ + public java.lang.String getRowRestriction() { + java.lang.Object ref = rowRestriction_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rowRestriction_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @return The bytes for rowRestriction. + */ + public com.google.protobuf.ByteString getRowRestrictionBytes() { + java.lang.Object ref = rowRestriction_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rowRestriction_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @param value The rowRestriction to set. + * @return This builder for chaining. + */ + public Builder setRowRestriction(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + rowRestriction_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @return This builder for chaining. + */ + public Builder clearRowRestriction() { + rowRestriction_ = getDefaultInstance().getRowRestriction(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Aggregates are not supported.
+       *
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       *
+       * Restricted to a maximum length for 1 MB.
+       * 
+ * + * string row_restriction = 2; + * + * @param value The bytes for rowRestriction to set. + * @return This builder for chaining. + */ + public Builder setRowRestrictionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + rowRestriction_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + arrowSerializationOptions_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions, + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptionsOrBuilder> + arrowSerializationOptionsBuilder_; + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the arrowSerializationOptions field is set. + */ + public boolean hasArrowSerializationOptions() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The arrowSerializationOptions. + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + getArrowSerializationOptions() { + if (arrowSerializationOptionsBuilder_ == null) { + return arrowSerializationOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + .getDefaultInstance() + : arrowSerializationOptions_; + } else { + return arrowSerializationOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setArrowSerializationOptions( + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions value) { + if (arrowSerializationOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + arrowSerializationOptions_ = value; + } else { + arrowSerializationOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setArrowSerializationOptions( + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Builder + builderForValue) { + if (arrowSerializationOptionsBuilder_ == null) { + arrowSerializationOptions_ = builderForValue.build(); + } else { + arrowSerializationOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeArrowSerializationOptions( + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions value) { + if (arrowSerializationOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && arrowSerializationOptions_ != null + && arrowSerializationOptions_ + != com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + .getDefaultInstance()) { + getArrowSerializationOptionsBuilder().mergeFrom(value); + } else { + arrowSerializationOptions_ = value; + } + } else { + arrowSerializationOptionsBuilder_.mergeFrom(value); + } + if (arrowSerializationOptions_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearArrowSerializationOptions() { + bitField0_ = (bitField0_ & ~0x00000004); + arrowSerializationOptions_ = null; + if (arrowSerializationOptionsBuilder_ != null) { + arrowSerializationOptionsBuilder_.dispose(); + arrowSerializationOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Builder + getArrowSerializationOptionsBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getArrowSerializationOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptionsOrBuilder + getArrowSerializationOptionsOrBuilder() { + if (arrowSerializationOptionsBuilder_ != null) { + return arrowSerializationOptionsBuilder_.getMessageOrBuilder(); + } else { + return arrowSerializationOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions + .getDefaultInstance() + : arrowSerializationOptions_; + } + } + + /** + * + * + *
+       * Optional. Options specific to the Apache Arrow output format.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions arrow_serialization_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions, + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptionsOrBuilder> + getArrowSerializationOptionsFieldBuilder() { + if (arrowSerializationOptionsBuilder_ == null) { + arrowSerializationOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions, + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptionsOrBuilder>( + getArrowSerializationOptions(), getParentForChildren(), isClean()); + arrowSerializationOptions_ = null; + } + return arrowSerializationOptionsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) + private static final com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableReadOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + private int schemaCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object schema_; + + public enum SchemaCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + AVRO_SCHEMA(4), + ARROW_SCHEMA(5), + SCHEMA_NOT_SET(0); + private final int value; + + private SchemaCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SchemaCase valueOf(int value) { + return forNumber(value); + } + + public static SchemaCase forNumber(int value) { + switch (value) { + case 4: + return AVRO_SCHEMA; + case 5: + return ARROW_SCHEMA; + case 0: + return SCHEMA_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Output only. Unique identifier for the session, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. Unique identifier for the session, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EXPIRE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp expireTime_; + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time, subsequent
+   * requests to read this Session will return errors. The expire_time is
+   * automatically assigned and currently cannot be specified or updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + @java.lang.Override + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time, subsequent
+   * requests to read this Session will return errors. The expire_time is
+   * automatically assigned and currently cannot be specified or updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getExpireTime() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time, subsequent
+   * requests to read this Session will return errors. The expire_time is
+   * automatically assigned and currently cannot be specified or updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + public static final int DATA_FORMAT_FIELD_NUMBER = 3; + private int dataFormat_ = 0; + + /** + * + * + *
+   * Immutable. Data format of the output data.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for dataFormat. + */ + @java.lang.Override + public int getDataFormatValue() { + return dataFormat_; + } + + /** + * + * + *
+   * Immutable. Data format of the output data.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The dataFormat. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.DataFormat getDataFormat() { + com.google.cloud.bigquery.storage.v1beta2.DataFormat result = + com.google.cloud.bigquery.storage.v1beta2.DataFormat.forNumber(dataFormat_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.DataFormat.UNRECOGNIZED + : result; + } + + public static final int AVRO_SCHEMA_FIELD_NUMBER = 4; + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 4; + } + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema getAvroSchema() { + if (schemaCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder getAvroSchemaOrBuilder() { + if (schemaCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + + public static final int ARROW_SCHEMA_FIELD_NUMBER = 5; + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 5; + } + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getArrowSchema() { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder getArrowSchemaOrBuilder() { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + + public static final int TABLE_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
+   * Immutable. Table that this ReadSession is reading from, in the form
+   * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+   * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
+   * Immutable. Table that this ReadSession is reading from, in the form
+   * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+   * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TABLE_MODIFIERS_FIELD_NUMBER = 7; + private com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers tableModifiers_; + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the tableModifiers field is set. + */ + @java.lang.Override + public boolean hasTableModifiers() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The tableModifiers. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers getTableModifiers() { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.getDefaultInstance() + : tableModifiers_; + } + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder + getTableModifiersOrBuilder() { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.getDefaultInstance() + : tableModifiers_; + } + + public static final int READ_OPTIONS_FIELD_NUMBER = 8; + private com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions readOptions_; + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the readOptions field is set. + */ + @java.lang.Override + public boolean hasReadOptions() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The readOptions. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions getReadOptions() { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .getDefaultInstance() + : readOptions_; + } + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder + getReadOptionsOrBuilder() { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .getDefaultInstance() + : readOptions_; + } + + public static final int STREAMS_FIELD_NUMBER = 10; + + @SuppressWarnings("serial") + private java.util.List streams_; + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List getStreamsList() { + return streams_; + } + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getStreamsOrBuilderList() { + return streams_; + } + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getStreamsCount() { + return streams_.size(); + } + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getStreams(int index) { + return streams_.get(index); + } + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder getStreamsOrBuilder( + int index) { + return streams_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getExpireTime()); + } + if (dataFormat_ + != com.google.cloud.bigquery.storage.v1beta2.DataFormat.DATA_FORMAT_UNSPECIFIED + .getNumber()) { + output.writeEnum(3, dataFormat_); + } + if (schemaCase_ == 4) { + output.writeMessage(4, (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_); + } + if (schemaCase_ == 5) { + output.writeMessage(5, (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(table_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, table_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(7, getTableModifiers()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(8, getReadOptions()); + } + for (int i = 0; i < streams_.size(); i++) { + output.writeMessage(10, streams_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getExpireTime()); + } + if (dataFormat_ + != com.google.cloud.bigquery.storage.v1beta2.DataFormat.DATA_FORMAT_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, dataFormat_); + } + if (schemaCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_); + } + if (schemaCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 5, (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(table_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, table_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getTableModifiers()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getReadOptions()); + } + for (int i = 0; i < streams_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, streams_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ReadSession)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ReadSession other = + (com.google.cloud.bigquery.storage.v1beta2.ReadSession) obj; + + if (!getName().equals(other.getName())) return false; + if (hasExpireTime() != other.hasExpireTime()) return false; + if (hasExpireTime()) { + if (!getExpireTime().equals(other.getExpireTime())) return false; + } + if (dataFormat_ != other.dataFormat_) return false; + if (!getTable().equals(other.getTable())) return false; + if (hasTableModifiers() != other.hasTableModifiers()) return false; + if (hasTableModifiers()) { + if (!getTableModifiers().equals(other.getTableModifiers())) return false; + } + if (hasReadOptions() != other.hasReadOptions()) return false; + if (hasReadOptions()) { + if (!getReadOptions().equals(other.getReadOptions())) return false; + } + if (!getStreamsList().equals(other.getStreamsList())) return false; + if (!getSchemaCase().equals(other.getSchemaCase())) return false; + switch (schemaCase_) { + case 4: + if (!getAvroSchema().equals(other.getAvroSchema())) return false; + break; + case 5: + if (!getArrowSchema().equals(other.getArrowSchema())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasExpireTime()) { + hash = (37 * hash) + EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getExpireTime().hashCode(); + } + hash = (37 * hash) + DATA_FORMAT_FIELD_NUMBER; + hash = (53 * hash) + dataFormat_; + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + if (hasTableModifiers()) { + hash = (37 * hash) + TABLE_MODIFIERS_FIELD_NUMBER; + hash = (53 * hash) + getTableModifiers().hashCode(); + } + if (hasReadOptions()) { + hash = (37 * hash) + READ_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getReadOptions().hashCode(); + } + if (getStreamsCount() > 0) { + hash = (37 * hash) + STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getStreamsList().hashCode(); + } + switch (schemaCase_) { + case 4: + hash = (37 * hash) + AVRO_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getAvroSchema().hashCode(); + break; + case 5: + hash = (37 * hash) + ARROW_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getArrowSchema().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ReadSession prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information about the ReadSession.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadSession} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ReadSession) + com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.class, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ReadSession.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getExpireTimeFieldBuilder(); + getTableModifiersFieldBuilder(); + getReadOptionsFieldBuilder(); + getStreamsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + dataFormat_ = 0; + if (avroSchemaBuilder_ != null) { + avroSchemaBuilder_.clear(); + } + if (arrowSchemaBuilder_ != null) { + arrowSchemaBuilder_.clear(); + } + table_ = ""; + tableModifiers_ = null; + if (tableModifiersBuilder_ != null) { + tableModifiersBuilder_.dispose(); + tableModifiersBuilder_ = null; + } + readOptions_ = null; + if (readOptionsBuilder_ != null) { + readOptionsBuilder_.dispose(); + readOptionsBuilder_ = null; + } + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + } else { + streams_ = null; + streamsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + schemaCase_ = 0; + schema_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ReadSession.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession build() { + com.google.cloud.bigquery.storage.v1beta2.ReadSession result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ReadSession result = + new com.google.cloud.bigquery.storage.v1beta2.ReadSession(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta2.ReadSession result) { + if (streamsBuilder_ == null) { + if (((bitField0_ & 0x00000100) != 0)) { + streams_ = java.util.Collections.unmodifiableList(streams_); + bitField0_ = (bitField0_ & ~0x00000100); + } + result.streams_ = streams_; + } else { + result.streams_ = streamsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.ReadSession result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.expireTime_ = expireTimeBuilder_ == null ? expireTime_ : expireTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.dataFormat_ = dataFormat_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.table_ = table_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.tableModifiers_ = + tableModifiersBuilder_ == null ? tableModifiers_ : tableModifiersBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.readOptions_ = + readOptionsBuilder_ == null ? readOptions_ : readOptionsBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.cloud.bigquery.storage.v1beta2.ReadSession result) { + result.schemaCase_ = schemaCase_; + result.schema_ = this.schema_; + if (schemaCase_ == 4 && avroSchemaBuilder_ != null) { + result.schema_ = avroSchemaBuilder_.build(); + } + if (schemaCase_ == 5 && arrowSchemaBuilder_ != null) { + result.schema_ = arrowSchemaBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ReadSession) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ReadSession) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ReadSession other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ReadSession.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasExpireTime()) { + mergeExpireTime(other.getExpireTime()); + } + if (other.dataFormat_ != 0) { + setDataFormatValue(other.getDataFormatValue()); + } + if (!other.getTable().isEmpty()) { + table_ = other.table_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (other.hasTableModifiers()) { + mergeTableModifiers(other.getTableModifiers()); + } + if (other.hasReadOptions()) { + mergeReadOptions(other.getReadOptions()); + } + if (streamsBuilder_ == null) { + if (!other.streams_.isEmpty()) { + if (streams_.isEmpty()) { + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000100); + } else { + ensureStreamsIsMutable(); + streams_.addAll(other.streams_); + } + onChanged(); + } + } else { + if (!other.streams_.isEmpty()) { + if (streamsBuilder_.isEmpty()) { + streamsBuilder_.dispose(); + streamsBuilder_ = null; + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000100); + streamsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getStreamsFieldBuilder() + : null; + } else { + streamsBuilder_.addAllMessages(other.streams_); + } + } + } + switch (other.getSchemaCase()) { + case AVRO_SCHEMA: + { + mergeAvroSchema(other.getAvroSchema()); + break; + } + case ARROW_SCHEMA: + { + mergeArrowSchema(other.getArrowSchema()); + break; + } + case SCHEMA_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + dataFormat_ = input.readEnum(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + input.readMessage(getAvroSchemaFieldBuilder().getBuilder(), extensionRegistry); + schemaCase_ = 4; + break; + } // case 34 + case 42: + { + input.readMessage(getArrowSchemaFieldBuilder().getBuilder(), extensionRegistry); + schemaCase_ = 5; + break; + } // case 42 + case 50: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 58: + { + input.readMessage(getTableModifiersFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 58 + case 66: + { + input.readMessage(getReadOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 66 + case 82: + { + com.google.cloud.bigquery.storage.v1beta2.ReadStream m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.parser(), + extensionRegistry); + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(m); + } else { + streamsBuilder_.addMessage(m); + } + break; + } // case 82 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int schemaCase_ = 0; + private java.lang.Object schema_; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public Builder clearSchema() { + schemaCase_ = 0; + schema_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp expireTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + expireTimeBuilder_; + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors. The expire_time is
+     * automatically assigned and currently cannot be specified or updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors. The expire_time is
+     * automatically assigned and currently cannot be specified or updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + if (expireTimeBuilder_ == null) { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } else { + return expireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors. The expire_time is
+     * automatically assigned and currently cannot be specified or updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + expireTime_ = value; + } else { + expireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors. The expire_time is
+     * automatically assigned and currently cannot be specified or updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (expireTimeBuilder_ == null) { + expireTime_ = builderForValue.build(); + } else { + expireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors. The expire_time is
+     * automatically assigned and currently cannot be specified or updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && expireTime_ != null + && expireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getExpireTimeBuilder().mergeFrom(value); + } else { + expireTime_ = value; + } + } else { + expireTimeBuilder_.mergeFrom(value); + } + if (expireTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors. The expire_time is
+     * automatically assigned and currently cannot be specified or updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearExpireTime() { + bitField0_ = (bitField0_ & ~0x00000002); + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors. The expire_time is
+     * automatically assigned and currently cannot be specified or updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors. The expire_time is
+     * automatically assigned and currently cannot be specified or updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + if (expireTimeBuilder_ != null) { + return expireTimeBuilder_.getMessageOrBuilder(); + } else { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } + } + + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time, subsequent
+     * requests to read this Session will return errors. The expire_time is
+     * automatically assigned and currently cannot be specified or updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getExpireTimeFieldBuilder() { + if (expireTimeBuilder_ == null) { + expireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getExpireTime(), getParentForChildren(), isClean()); + expireTime_ = null; + } + return expireTimeBuilder_; + } + + private int dataFormat_ = 0; + + /** + * + * + *
+     * Immutable. Data format of the output data.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for dataFormat. + */ + @java.lang.Override + public int getDataFormatValue() { + return dataFormat_; + } + + /** + * + * + *
+     * Immutable. Data format of the output data.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The enum numeric value on the wire for dataFormat to set. + * @return This builder for chaining. + */ + public Builder setDataFormatValue(int value) { + dataFormat_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Data format of the output data.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The dataFormat. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.DataFormat getDataFormat() { + com.google.cloud.bigquery.storage.v1beta2.DataFormat result = + com.google.cloud.bigquery.storage.v1beta2.DataFormat.forNumber(dataFormat_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.DataFormat.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Immutable. Data format of the output data.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The dataFormat to set. + * @return This builder for chaining. + */ + public Builder setDataFormat(com.google.cloud.bigquery.storage.v1beta2.DataFormat value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + dataFormat_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Data format of the output data.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return This builder for chaining. + */ + public Builder clearDataFormat() { + bitField0_ = (bitField0_ & ~0x00000004); + dataFormat_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroSchema, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder> + avroSchemaBuilder_; + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 4; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema getAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } else { + if (schemaCase_ == 4) { + return avroSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema(com.google.cloud.bigquery.storage.v1beta2.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + avroSchemaBuilder_.setMessage(value); + } + schemaCase_ = 4; + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema( + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder builderForValue) { + if (avroSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + avroSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 4; + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeAvroSchema(com.google.cloud.bigquery.storage.v1beta2.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 4 + && schema_ + != com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 4) { + avroSchemaBuilder_.mergeFrom(value); + } else { + avroSchemaBuilder_.setMessage(value); + } + } + schemaCase_ = 4; + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 4) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 4) { + schemaCase_ = 0; + schema_ = null; + } + avroSchemaBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder getAvroSchemaBuilder() { + return getAvroSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder getAvroSchemaOrBuilder() { + if ((schemaCase_ == 4) && (avroSchemaBuilder_ != null)) { + return avroSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroSchema, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder> + getAvroSchemaFieldBuilder() { + if (avroSchemaBuilder_ == null) { + if (!(schemaCase_ == 4)) { + schema_ = com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + avroSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroSchema, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 4; + onChanged(); + return avroSchemaBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder> + arrowSchemaBuilder_; + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 5; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } else { + if (schemaCase_ == 5) { + return arrowSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema(com.google.cloud.bigquery.storage.v1beta2.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(value); + } + schemaCase_ = 5; + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema( + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder builderForValue) { + if (arrowSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 5; + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeArrowSchema(com.google.cloud.bigquery.storage.v1beta2.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 5 + && schema_ + != com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 5) { + arrowSchemaBuilder_.mergeFrom(value); + } else { + arrowSchemaBuilder_.setMessage(value); + } + } + schemaCase_ = 5; + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 5) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 5) { + schemaCase_ = 0; + schema_ = null; + } + arrowSchemaBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder getArrowSchemaBuilder() { + return getArrowSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder + getArrowSchemaOrBuilder() { + if ((schemaCase_ == 5) && (arrowSchemaBuilder_ != null)) { + return arrowSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + } + + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder> + getArrowSchemaFieldBuilder() { + if (arrowSchemaBuilder_ == null) { + if (!(schemaCase_ == 5)) { + schema_ = com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + arrowSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 5; + onChanged(); + return arrowSchemaBuilder_; + } + + private java.lang.Object table_ = ""; + + /** + * + * + *
+     * Immutable. Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+     * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Immutable. Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+     * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Immutable. Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+     * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+     * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+     * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers tableModifiers_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder> + tableModifiersBuilder_; + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the tableModifiers field is set. + */ + public boolean hasTableModifiers() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The tableModifiers. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + getTableModifiers() { + if (tableModifiersBuilder_ == null) { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + .getDefaultInstance() + : tableModifiers_; + } else { + return tableModifiersBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setTableModifiers( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers value) { + if (tableModifiersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableModifiers_ = value; + } else { + tableModifiersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setTableModifiers( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder + builderForValue) { + if (tableModifiersBuilder_ == null) { + tableModifiers_ = builderForValue.build(); + } else { + tableModifiersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeTableModifiers( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers value) { + if (tableModifiersBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && tableModifiers_ != null + && tableModifiers_ + != com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + .getDefaultInstance()) { + getTableModifiersBuilder().mergeFrom(value); + } else { + tableModifiers_ = value; + } + } else { + tableModifiersBuilder_.mergeFrom(value); + } + if (tableModifiers_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearTableModifiers() { + bitField0_ = (bitField0_ & ~0x00000040); + tableModifiers_ = null; + if (tableModifiersBuilder_ != null) { + tableModifiersBuilder_.dispose(); + tableModifiersBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder + getTableModifiersBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return getTableModifiersFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder + getTableModifiersOrBuilder() { + if (tableModifiersBuilder_ != null) { + return tableModifiersBuilder_.getMessageOrBuilder(); + } else { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + .getDefaultInstance() + : tableModifiers_; + } + } + + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder> + getTableModifiersFieldBuilder() { + if (tableModifiersBuilder_ == null) { + tableModifiersBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder>( + getTableModifiers(), getParentForChildren(), isClean()); + tableModifiers_ = null; + } + return tableModifiersBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions readOptions_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder> + readOptionsBuilder_; + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the readOptions field is set. + */ + public boolean hasReadOptions() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The readOptions. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions getReadOptions() { + if (readOptionsBuilder_ == null) { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .getDefaultInstance() + : readOptions_; + } else { + return readOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setReadOptions( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions value) { + if (readOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readOptions_ = value; + } else { + readOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setReadOptions( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder + builderForValue) { + if (readOptionsBuilder_ == null) { + readOptions_ = builderForValue.build(); + } else { + readOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeReadOptions( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions value) { + if (readOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && readOptions_ != null + && readOptions_ + != com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .getDefaultInstance()) { + getReadOptionsBuilder().mergeFrom(value); + } else { + readOptions_ = value; + } + } else { + readOptionsBuilder_.mergeFrom(value); + } + if (readOptions_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearReadOptions() { + bitField0_ = (bitField0_ & ~0x00000080); + readOptions_ = null; + if (readOptionsBuilder_ != null) { + readOptionsBuilder_.dispose(); + readOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder + getReadOptionsBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return getReadOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder + getReadOptionsOrBuilder() { + if (readOptionsBuilder_ != null) { + return readOptionsBuilder_.getMessageOrBuilder(); + } else { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .getDefaultInstance() + : readOptions_; + } + } + + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder> + getReadOptionsFieldBuilder() { + if (readOptionsBuilder_ == null) { + readOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder>( + getReadOptions(), getParentForChildren(), isClean()); + readOptions_ = null; + } + return readOptionsBuilder_; + } + + private java.util.List streams_ = + java.util.Collections.emptyList(); + + private void ensureStreamsIsMutable() { + if (!((bitField0_ & 0x00000100) != 0)) { + streams_ = + new java.util.ArrayList(streams_); + bitField0_ |= 0x00000100; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder> + streamsBuilder_; + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List getStreamsList() { + if (streamsBuilder_ == null) { + return java.util.Collections.unmodifiableList(streams_); + } else { + return streamsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getStreamsCount() { + if (streamsBuilder_ == null) { + return streams_.size(); + } else { + return streamsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getStreams(int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStreams( + int index, com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.set(index, value); + onChanged(); + } else { + streamsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStreams( + int index, com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.set(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams(com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(value); + onChanged(); + } else { + streamsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + int index, com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(index, value); + onChanged(); + } else { + streamsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + int index, com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllStreams( + java.lang.Iterable values) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, streams_); + onChanged(); + } else { + streamsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearStreams() { + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + } else { + streamsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeStreams(int index) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.remove(index); + onChanged(); + } else { + streamsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder getStreamsBuilder( + int index) { + return getStreamsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder getStreamsOrBuilder( + int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStreamsOrBuilderList() { + if (streamsBuilder_ != null) { + return streamsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(streams_); + } + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder addStreamsBuilder() { + return getStreamsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder addStreamsBuilder( + int index) { + return getStreamsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance()); + } + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     *
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStreamsBuilderList() { + return getStreamsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder> + getStreamsFieldBuilder() { + if (streamsBuilder_ == null) { + streamsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder>( + streams_, ((bitField0_ & 0x00000100) != 0), getParentForChildren(), isClean()); + streams_ = null; + } + return streamsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ReadSession) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ReadSession) + private static final com.google.cloud.bigquery.storage.v1beta2.ReadSession DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ReadSession(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadSession parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSessionOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSessionOrBuilder.java new file mode 100644 index 000000000000..04b5e1daabb2 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSessionOrBuilder.java @@ -0,0 +1,429 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ReadSessionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ReadSession) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. Unique identifier for the session, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Output only. Unique identifier for the session, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time, subsequent
+   * requests to read this Session will return errors. The expire_time is
+   * automatically assigned and currently cannot be specified or updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + boolean hasExpireTime(); + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time, subsequent
+   * requests to read this Session will return errors. The expire_time is
+   * automatically assigned and currently cannot be specified or updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + com.google.protobuf.Timestamp getExpireTime(); + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time, subsequent
+   * requests to read this Session will return errors. The expire_time is
+   * automatically assigned and currently cannot be specified or updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder(); + + /** + * + * + *
+   * Immutable. Data format of the output data.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for dataFormat. + */ + int getDataFormatValue(); + + /** + * + * + *
+   * Immutable. Data format of the output data.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The dataFormat. + */ + com.google.cloud.bigquery.storage.v1beta2.DataFormat getDataFormat(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + boolean hasAvroSchema(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + com.google.cloud.bigquery.storage.v1beta2.AvroSchema getAvroSchema(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder getAvroSchemaOrBuilder(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + boolean hasArrowSchema(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getArrowSchema(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder getArrowSchemaOrBuilder(); + + /** + * + * + *
+   * Immutable. Table that this ReadSession is reading from, in the form
+   * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+   * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
+   * Immutable. Table that this ReadSession is reading from, in the form
+   * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+   * 
+ * + * + * string table = 6 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the tableModifiers field is set. + */ + boolean hasTableModifiers(); + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The tableModifiers. + */ + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers getTableModifiers(); + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder + getTableModifiersOrBuilder(); + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the readOptions field is set. + */ + boolean hasReadOptions(); + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The readOptions. + */ + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions getReadOptions(); + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder + getReadOptionsOrBuilder(); + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getStreamsList(); + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ReadStream getStreams(int index); + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getStreamsCount(); + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getStreamsOrBuilderList(); + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   *
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder getStreamsOrBuilder(int index); + + com.google.cloud.bigquery.storage.v1beta2.ReadSession.SchemaCase getSchemaCase(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStream.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStream.java new file mode 100644 index 000000000000..8d8cbbbe2a4b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStream.java @@ -0,0 +1,645 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Information about a single stream that gets data out of the storage system.
+ * Most of the information about `ReadStream` instances is aggregated, making
+ * `ReadStream` lightweight.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadStream} + */ +public final class ReadStream extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ReadStream) + ReadStreamOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadStream.newBuilder() to construct. + private ReadStream(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadStream() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadStream(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.class, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ReadStream)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ReadStream other = + (com.google.cloud.bigquery.storage.v1beta2.ReadStream) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1beta2.ReadStream prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information about a single stream that gets data out of the storage system.
+   * Most of the information about `ReadStream` instances is aggregated, making
+   * `ReadStream` lightweight.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadStream} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ReadStream) + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.class, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ReadStream.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStream build() { + com.google.cloud.bigquery.storage.v1beta2.ReadStream result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStream buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ReadStream result = + new com.google.cloud.bigquery.storage.v1beta2.ReadStream(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.ReadStream result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ReadStream) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ReadStream) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ReadStream other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ReadStream) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ReadStream) + private static final com.google.cloud.bigquery.storage.v1beta2.ReadStream DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ReadStream(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadStream parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamName.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamName.java new file mode 100644 index 000000000000..32e330887b97 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamName.java @@ -0,0 +1,257 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ReadStreamName implements ResourceName { + private static final PathTemplate PROJECT_LOCATION_SESSION_STREAM = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/locations/{location}/sessions/{session}/streams/{stream}"); + private volatile Map fieldValuesMap; + private final String project; + private final String location; + private final String session; + private final String stream; + + @Deprecated + protected ReadStreamName() { + project = null; + location = null; + session = null; + stream = null; + } + + private ReadStreamName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + location = Preconditions.checkNotNull(builder.getLocation()); + session = Preconditions.checkNotNull(builder.getSession()); + stream = Preconditions.checkNotNull(builder.getStream()); + } + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getSession() { + return session; + } + + public String getStream() { + return stream; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ReadStreamName of(String project, String location, String session, String stream) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setSession(session) + .setStream(stream) + .build(); + } + + public static String format(String project, String location, String session, String stream) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setSession(session) + .setStream(stream) + .build() + .toString(); + } + + public static ReadStreamName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_LOCATION_SESSION_STREAM.validatedMatch( + formattedString, "ReadStreamName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("location"), + matchMap.get("session"), + matchMap.get("stream")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ReadStreamName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_LOCATION_SESSION_STREAM.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (location != null) { + fieldMapBuilder.put("location", location); + } + if (session != null) { + fieldMapBuilder.put("session", session); + } + if (stream != null) { + fieldMapBuilder.put("stream", stream); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_LOCATION_SESSION_STREAM.instantiate( + "project", project, "location", location, "session", session, "stream", stream); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + ReadStreamName that = ((ReadStreamName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.location, that.location) + && Objects.equals(this.session, that.session) + && Objects.equals(this.stream, that.stream); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(location); + h *= 1000003; + h ^= Objects.hashCode(session); + h *= 1000003; + h ^= Objects.hashCode(stream); + return h; + } + + /** Builder for projects/{project}/locations/{location}/sessions/{session}/streams/{stream}. */ + public static class Builder { + private String project; + private String location; + private String session; + private String stream; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getSession() { + return session; + } + + public String getStream() { + return stream; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setSession(String session) { + this.session = session; + return this; + } + + public Builder setStream(String stream) { + this.stream = stream; + return this; + } + + private Builder(ReadStreamName readStreamName) { + this.project = readStreamName.project; + this.location = readStreamName.location; + this.session = readStreamName.session; + this.stream = readStreamName.stream; + } + + public ReadStreamName build() { + return new ReadStreamName(this); + } + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamOrBuilder.java new file mode 100644 index 000000000000..74e71d9450b2 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ReadStreamOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ReadStream) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequest.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequest.java new file mode 100644 index 000000000000..b60d4b08b26c --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequest.java @@ -0,0 +1,781 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Request message for `SplitReadStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest} + */ +public final class SplitReadStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) + SplitReadStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use SplitReadStreamRequest.newBuilder() to construct. + private SplitReadStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SplitReadStreamRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SplitReadStreamRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. Name of the stream to split.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. Name of the stream to split.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FRACTION_FIELD_NUMBER = 2; + private double fraction_ = 0D; + + /** + * + * + *
+   * A value in the range (0.0, 1.0) that specifies the fractional point at
+   * which the original stream should be split. The actual split point is
+   * evaluated on pre-filtered rows, so if a filter is provided, then there is
+   * no guarantee that the division of the rows between the new child streams
+   * will be proportional to this fractional value. Additionally, because the
+   * server-side unit for assigning data is collections of rows, this fraction
+   * will always map to a data storage boundary on the server side.
+   * 
+ * + * double fraction = 2; + * + * @return The fraction. + */ + @java.lang.Override + public double getFraction() { + return fraction_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (java.lang.Double.doubleToRawLongBits(fraction_) != 0) { + output.writeDouble(2, fraction_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (java.lang.Double.doubleToRawLongBits(fraction_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(2, fraction_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest other = + (com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (java.lang.Double.doubleToLongBits(getFraction()) + != java.lang.Double.doubleToLongBits(other.getFraction())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + FRACTION_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getFraction())); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Request message for `SplitReadStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + fraction_ = 0D; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest build() { + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest result = + new com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.fraction_ = fraction_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getFraction() != 0D) { + setFraction(other.getFraction()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 17: + { + fraction_ = input.readDouble(); + bitField0_ |= 0x00000002; + break; + } // case 17 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private double fraction_; + + /** + * + * + *
+     * A value in the range (0.0, 1.0) that specifies the fractional point at
+     * which the original stream should be split. The actual split point is
+     * evaluated on pre-filtered rows, so if a filter is provided, then there is
+     * no guarantee that the division of the rows between the new child streams
+     * will be proportional to this fractional value. Additionally, because the
+     * server-side unit for assigning data is collections of rows, this fraction
+     * will always map to a data storage boundary on the server side.
+     * 
+ * + * double fraction = 2; + * + * @return The fraction. + */ + @java.lang.Override + public double getFraction() { + return fraction_; + } + + /** + * + * + *
+     * A value in the range (0.0, 1.0) that specifies the fractional point at
+     * which the original stream should be split. The actual split point is
+     * evaluated on pre-filtered rows, so if a filter is provided, then there is
+     * no guarantee that the division of the rows between the new child streams
+     * will be proportional to this fractional value. Additionally, because the
+     * server-side unit for assigning data is collections of rows, this fraction
+     * will always map to a data storage boundary on the server side.
+     * 
+ * + * double fraction = 2; + * + * @param value The fraction to set. + * @return This builder for chaining. + */ + public Builder setFraction(double value) { + + fraction_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * A value in the range (0.0, 1.0) that specifies the fractional point at
+     * which the original stream should be split. The actual split point is
+     * evaluated on pre-filtered rows, so if a filter is provided, then there is
+     * no guarantee that the division of the rows between the new child streams
+     * will be proportional to this fractional value. Additionally, because the
+     * server-side unit for assigning data is collections of rows, this fraction
+     * will always map to a data storage boundary on the server side.
+     * 
+ * + * double fraction = 2; + * + * @return This builder for chaining. + */ + public Builder clearFraction() { + bitField0_ = (bitField0_ & ~0x00000002); + fraction_ = 0D; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) + private static final com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SplitReadStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequestOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequestOrBuilder.java new file mode 100644 index 000000000000..a881cdb02fc5 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequestOrBuilder.java @@ -0,0 +1,75 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface SplitReadStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the stream to split.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. Name of the stream to split.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * A value in the range (0.0, 1.0) that specifies the fractional point at
+   * which the original stream should be split. The actual split point is
+   * evaluated on pre-filtered rows, so if a filter is provided, then there is
+   * no guarantee that the division of the rows between the new child streams
+   * will be proportional to this fractional value. Additionally, because the
+   * server-side unit for assigning data is collections of rows, this fraction
+   * will always map to a data storage boundary on the server side.
+   * 
+ * + * double fraction = 2; + * + * @return The fraction. + */ + double getFraction(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponse.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponse.java new file mode 100644 index 000000000000..ba85c6dd6934 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponse.java @@ -0,0 +1,1056 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** Protobuf type {@code google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse} */ +public final class SplitReadStreamResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) + SplitReadStreamResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use SplitReadStreamResponse.newBuilder() to construct. + private SplitReadStreamResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SplitReadStreamResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SplitReadStreamResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.class, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.Builder.class); + } + + private int bitField0_; + public static final int PRIMARY_STREAM_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1beta2.ReadStream primaryStream_; + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + * + * @return Whether the primaryStream field is set. + */ + @java.lang.Override + public boolean hasPrimaryStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + * + * @return The primaryStream. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getPrimaryStream() { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : primaryStream_; + } + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder getPrimaryStreamOrBuilder() { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : primaryStream_; + } + + public static final int REMAINDER_STREAM_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta2.ReadStream remainderStream_; + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + * + * @return Whether the remainderStream field is set. + */ + @java.lang.Override + public boolean hasRemainderStream() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + * + * @return The remainderStream. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getRemainderStream() { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : remainderStream_; + } + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder + getRemainderStreamOrBuilder() { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : remainderStream_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getPrimaryStream()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getRemainderStream()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getPrimaryStream()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getRemainderStream()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse other = + (com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) obj; + + if (hasPrimaryStream() != other.hasPrimaryStream()) return false; + if (hasPrimaryStream()) { + if (!getPrimaryStream().equals(other.getPrimaryStream())) return false; + } + if (hasRemainderStream() != other.hasRemainderStream()) return false; + if (hasRemainderStream()) { + if (!getRemainderStream().equals(other.getRemainderStream())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasPrimaryStream()) { + hash = (37 * hash) + PRIMARY_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getPrimaryStream().hashCode(); + } + if (hasRemainderStream()) { + hash = (37 * hash) + REMAINDER_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getRemainderStream().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse} */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.class, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getPrimaryStreamFieldBuilder(); + getRemainderStreamFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + primaryStream_ = null; + if (primaryStreamBuilder_ != null) { + primaryStreamBuilder_.dispose(); + primaryStreamBuilder_ = null; + } + remainderStream_ = null; + if (remainderStreamBuilder_ != null) { + remainderStreamBuilder_.dispose(); + remainderStreamBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse build() { + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse result = + new com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.primaryStream_ = + primaryStreamBuilder_ == null ? primaryStream_ : primaryStreamBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.remainderStream_ = + remainderStreamBuilder_ == null ? remainderStream_ : remainderStreamBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.getDefaultInstance()) + return this; + if (other.hasPrimaryStream()) { + mergePrimaryStream(other.getPrimaryStream()); + } + if (other.hasRemainderStream()) { + mergeRemainderStream(other.getRemainderStream()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getPrimaryStreamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getRemainderStreamFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1beta2.ReadStream primaryStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder> + primaryStreamBuilder_; + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + * + * @return Whether the primaryStream field is set. + */ + public boolean hasPrimaryStream() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + * + * @return The primaryStream. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getPrimaryStream() { + if (primaryStreamBuilder_ == null) { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : primaryStream_; + } else { + return primaryStreamBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public Builder setPrimaryStream(com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (primaryStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + primaryStream_ = value; + } else { + primaryStreamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public Builder setPrimaryStream( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder builderForValue) { + if (primaryStreamBuilder_ == null) { + primaryStream_ = builderForValue.build(); + } else { + primaryStreamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public Builder mergePrimaryStream(com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (primaryStreamBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && primaryStream_ != null + && primaryStream_ + != com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance()) { + getPrimaryStreamBuilder().mergeFrom(value); + } else { + primaryStream_ = value; + } + } else { + primaryStreamBuilder_.mergeFrom(value); + } + if (primaryStream_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public Builder clearPrimaryStream() { + bitField0_ = (bitField0_ & ~0x00000001); + primaryStream_ = null; + if (primaryStreamBuilder_ != null) { + primaryStreamBuilder_.dispose(); + primaryStreamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder getPrimaryStreamBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getPrimaryStreamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder + getPrimaryStreamOrBuilder() { + if (primaryStreamBuilder_ != null) { + return primaryStreamBuilder_.getMessageOrBuilder(); + } else { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : primaryStream_; + } + } + + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder> + getPrimaryStreamFieldBuilder() { + if (primaryStreamBuilder_ == null) { + primaryStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder>( + getPrimaryStream(), getParentForChildren(), isClean()); + primaryStream_ = null; + } + return primaryStreamBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta2.ReadStream remainderStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder> + remainderStreamBuilder_; + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + * + * @return Whether the remainderStream field is set. + */ + public boolean hasRemainderStream() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + * + * @return The remainderStream. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getRemainderStream() { + if (remainderStreamBuilder_ == null) { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : remainderStream_; + } else { + return remainderStreamBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public Builder setRemainderStream(com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (remainderStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + remainderStream_ = value; + } else { + remainderStreamBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public Builder setRemainderStream( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder builderForValue) { + if (remainderStreamBuilder_ == null) { + remainderStream_ = builderForValue.build(); + } else { + remainderStreamBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public Builder mergeRemainderStream( + com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (remainderStreamBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && remainderStream_ != null + && remainderStream_ + != com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance()) { + getRemainderStreamBuilder().mergeFrom(value); + } else { + remainderStream_ = value; + } + } else { + remainderStreamBuilder_.mergeFrom(value); + } + if (remainderStream_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public Builder clearRemainderStream() { + bitField0_ = (bitField0_ & ~0x00000002); + remainderStream_ = null; + if (remainderStreamBuilder_ != null) { + remainderStreamBuilder_.dispose(); + remainderStreamBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder + getRemainderStreamBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRemainderStreamFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder + getRemainderStreamOrBuilder() { + if (remainderStreamBuilder_ != null) { + return remainderStreamBuilder_.getMessageOrBuilder(); + } else { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : remainderStream_; + } + } + + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder> + getRemainderStreamFieldBuilder() { + if (remainderStreamBuilder_ == null) { + remainderStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder>( + getRemainderStream(), getParentForChildren(), isClean()); + remainderStream_ = null; + } + return remainderStreamBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) + private static final com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SplitReadStreamResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponseOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponseOrBuilder.java new file mode 100644 index 000000000000..de7ccf31a3dc --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponseOrBuilder.java @@ -0,0 +1,109 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface SplitReadStreamResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + * + * @return Whether the primaryStream field is set. + */ + boolean hasPrimaryStream(); + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + * + * @return The primaryStream. + */ + com.google.cloud.bigquery.storage.v1beta2.ReadStream getPrimaryStream(); + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder getPrimaryStreamOrBuilder(); + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + * + * @return Whether the remainderStream field is set. + */ + boolean hasRemainderStream(); + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + * + * @return The remainderStream. + */ + com.google.cloud.bigquery.storage.v1beta2.ReadStream getRemainderStream(); + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder getRemainderStreamOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageError.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageError.java new file mode 100644 index 000000000000..033e2e463e4c --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageError.java @@ -0,0 +1,1257 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Structured custom BigQuery Storage error message. The error can be attached
+ * as error details in the returned rpc Status. In particular, the use of error
+ * codes allows more structured error handling, and reduces the need to evaluate
+ * unstructured error text strings.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.StorageError} + */ +public final class StorageError extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.StorageError) + StorageErrorOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StorageError.newBuilder() to construct. + private StorageError(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StorageError() { + code_ = 0; + entity_ = ""; + errorMessage_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StorageError(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StorageError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StorageError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.StorageError.class, + com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder.class); + } + + /** + * + * + *
+   * Error code for `StorageError`.
+   * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode} + */ + public enum StorageErrorCode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Default error.
+     * 
+ * + * STORAGE_ERROR_CODE_UNSPECIFIED = 0; + */ + STORAGE_ERROR_CODE_UNSPECIFIED(0), + /** + * + * + *
+     * Table is not found in the system.
+     * 
+ * + * TABLE_NOT_FOUND = 1; + */ + TABLE_NOT_FOUND(1), + /** + * + * + *
+     * Stream is already committed.
+     * 
+ * + * STREAM_ALREADY_COMMITTED = 2; + */ + STREAM_ALREADY_COMMITTED(2), + /** + * + * + *
+     * Stream is not found.
+     * 
+ * + * STREAM_NOT_FOUND = 3; + */ + STREAM_NOT_FOUND(3), + /** + * + * + *
+     * Invalid Stream type.
+     * For example, you try to commit a stream that is not pending.
+     * 
+ * + * INVALID_STREAM_TYPE = 4; + */ + INVALID_STREAM_TYPE(4), + /** + * + * + *
+     * Invalid Stream state.
+     * For example, you try to commit a stream that is not finalized or is
+     * garbaged.
+     * 
+ * + * INVALID_STREAM_STATE = 5; + */ + INVALID_STREAM_STATE(5), + /** + * + * + *
+     * Stream is finalized.
+     * 
+ * + * STREAM_FINALIZED = 6; + */ + STREAM_FINALIZED(6), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Default error.
+     * 
+ * + * STORAGE_ERROR_CODE_UNSPECIFIED = 0; + */ + public static final int STORAGE_ERROR_CODE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * Table is not found in the system.
+     * 
+ * + * TABLE_NOT_FOUND = 1; + */ + public static final int TABLE_NOT_FOUND_VALUE = 1; + + /** + * + * + *
+     * Stream is already committed.
+     * 
+ * + * STREAM_ALREADY_COMMITTED = 2; + */ + public static final int STREAM_ALREADY_COMMITTED_VALUE = 2; + + /** + * + * + *
+     * Stream is not found.
+     * 
+ * + * STREAM_NOT_FOUND = 3; + */ + public static final int STREAM_NOT_FOUND_VALUE = 3; + + /** + * + * + *
+     * Invalid Stream type.
+     * For example, you try to commit a stream that is not pending.
+     * 
+ * + * INVALID_STREAM_TYPE = 4; + */ + public static final int INVALID_STREAM_TYPE_VALUE = 4; + + /** + * + * + *
+     * Invalid Stream state.
+     * For example, you try to commit a stream that is not finalized or is
+     * garbaged.
+     * 
+ * + * INVALID_STREAM_STATE = 5; + */ + public static final int INVALID_STREAM_STATE_VALUE = 5; + + /** + * + * + *
+     * Stream is finalized.
+     * 
+ * + * STREAM_FINALIZED = 6; + */ + public static final int STREAM_FINALIZED_VALUE = 6; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static StorageErrorCode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static StorageErrorCode forNumber(int value) { + switch (value) { + case 0: + return STORAGE_ERROR_CODE_UNSPECIFIED; + case 1: + return TABLE_NOT_FOUND; + case 2: + return STREAM_ALREADY_COMMITTED; + case 3: + return STREAM_NOT_FOUND; + case 4: + return INVALID_STREAM_TYPE; + case 5: + return INVALID_STREAM_STATE; + case 6: + return STREAM_FINALIZED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public StorageErrorCode findValueByNumber(int number) { + return StorageErrorCode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageError.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final StorageErrorCode[] VALUES = values(); + + public static StorageErrorCode valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private StorageErrorCode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode) + } + + public static final int CODE_FIELD_NUMBER = 1; + private int code_ = 0; + + /** + * + * + *
+   * BigQuery Storage specific error code.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode code = 1; + * + * @return The enum numeric value on the wire for code. + */ + @java.lang.Override + public int getCodeValue() { + return code_; + } + + /** + * + * + *
+   * BigQuery Storage specific error code.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode code = 1; + * + * @return The code. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode getCode() { + com.google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode result = + com.google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode.forNumber(code_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode.UNRECOGNIZED + : result; + } + + public static final int ENTITY_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object entity_ = ""; + + /** + * + * + *
+   * Name of the failed entity.
+   * 
+ * + * string entity = 2; + * + * @return The entity. + */ + @java.lang.Override + public java.lang.String getEntity() { + java.lang.Object ref = entity_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entity_ = s; + return s; + } + } + + /** + * + * + *
+   * Name of the failed entity.
+   * 
+ * + * string entity = 2; + * + * @return The bytes for entity. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEntityBytes() { + java.lang.Object ref = entity_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entity_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ERROR_MESSAGE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object errorMessage_ = ""; + + /** + * + * + *
+   * Message that describes the error.
+   * 
+ * + * string error_message = 3; + * + * @return The errorMessage. + */ + @java.lang.Override + public java.lang.String getErrorMessage() { + java.lang.Object ref = errorMessage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + errorMessage_ = s; + return s; + } + } + + /** + * + * + *
+   * Message that describes the error.
+   * 
+ * + * string error_message = 3; + * + * @return The bytes for errorMessage. + */ + @java.lang.Override + public com.google.protobuf.ByteString getErrorMessageBytes() { + java.lang.Object ref = errorMessage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + errorMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (code_ + != com.google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode + .STORAGE_ERROR_CODE_UNSPECIFIED + .getNumber()) { + output.writeEnum(1, code_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(entity_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, entity_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(errorMessage_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, errorMessage_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (code_ + != com.google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode + .STORAGE_ERROR_CODE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, code_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(entity_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, entity_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(errorMessage_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, errorMessage_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.StorageError)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.StorageError other = + (com.google.cloud.bigquery.storage.v1beta2.StorageError) obj; + + if (code_ != other.code_) return false; + if (!getEntity().equals(other.getEntity())) return false; + if (!getErrorMessage().equals(other.getErrorMessage())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CODE_FIELD_NUMBER; + hash = (53 * hash) + code_; + hash = (37 * hash) + ENTITY_FIELD_NUMBER; + hash = (53 * hash) + getEntity().hashCode(); + hash = (37 * hash) + ERROR_MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getErrorMessage().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.StorageError prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Structured custom BigQuery Storage error message. The error can be attached
+   * as error details in the returned rpc Status. In particular, the use of error
+   * codes allows more structured error handling, and reduces the need to evaluate
+   * unstructured error text strings.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.StorageError} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.StorageError) + com.google.cloud.bigquery.storage.v1beta2.StorageErrorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StorageError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StorageError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.StorageError.class, + com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.StorageError.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + code_ = 0; + entity_ = ""; + errorMessage_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StorageError_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StorageError getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageError.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StorageError build() { + com.google.cloud.bigquery.storage.v1beta2.StorageError result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StorageError buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.StorageError result = + new com.google.cloud.bigquery.storage.v1beta2.StorageError(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.StorageError result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.code_ = code_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.entity_ = entity_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.errorMessage_ = errorMessage_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.StorageError) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.StorageError) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.StorageError other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.StorageError.getDefaultInstance()) + return this; + if (other.code_ != 0) { + setCodeValue(other.getCodeValue()); + } + if (!other.getEntity().isEmpty()) { + entity_ = other.entity_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getErrorMessage().isEmpty()) { + errorMessage_ = other.errorMessage_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + code_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + entity_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + errorMessage_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int code_ = 0; + + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode code = 1; + * + * @return The enum numeric value on the wire for code. + */ + @java.lang.Override + public int getCodeValue() { + return code_; + } + + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode code = 1; + * + * @param value The enum numeric value on the wire for code to set. + * @return This builder for chaining. + */ + public Builder setCodeValue(int value) { + code_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode code = 1; + * + * @return The code. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode getCode() { + com.google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode result = + com.google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode.forNumber(code_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode code = 1; + * + * @param value The code to set. + * @return This builder for chaining. + */ + public Builder setCode( + com.google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + code_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * BigQuery Storage specific error code.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode code = 1; + * + * @return This builder for chaining. + */ + public Builder clearCode() { + bitField0_ = (bitField0_ & ~0x00000001); + code_ = 0; + onChanged(); + return this; + } + + private java.lang.Object entity_ = ""; + + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @return The entity. + */ + public java.lang.String getEntity() { + java.lang.Object ref = entity_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entity_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @return The bytes for entity. + */ + public com.google.protobuf.ByteString getEntityBytes() { + java.lang.Object ref = entity_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entity_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @param value The entity to set. + * @return This builder for chaining. + */ + public Builder setEntity(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + entity_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @return This builder for chaining. + */ + public Builder clearEntity() { + entity_ = getDefaultInstance().getEntity(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Name of the failed entity.
+     * 
+ * + * string entity = 2; + * + * @param value The bytes for entity to set. + * @return This builder for chaining. + */ + public Builder setEntityBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + entity_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object errorMessage_ = ""; + + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @return The errorMessage. + */ + public java.lang.String getErrorMessage() { + java.lang.Object ref = errorMessage_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + errorMessage_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @return The bytes for errorMessage. + */ + public com.google.protobuf.ByteString getErrorMessageBytes() { + java.lang.Object ref = errorMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + errorMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @param value The errorMessage to set. + * @return This builder for chaining. + */ + public Builder setErrorMessage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + errorMessage_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @return This builder for chaining. + */ + public Builder clearErrorMessage() { + errorMessage_ = getDefaultInstance().getErrorMessage(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Message that describes the error.
+     * 
+ * + * string error_message = 3; + * + * @param value The bytes for errorMessage to set. + * @return This builder for chaining. + */ + public Builder setErrorMessageBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + errorMessage_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.StorageError) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.StorageError) + private static final com.google.cloud.bigquery.storage.v1beta2.StorageError DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.StorageError(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StorageError getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StorageError parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StorageError getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageErrorOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageErrorOrBuilder.java new file mode 100644 index 000000000000..8a0ae2807594 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageErrorOrBuilder.java @@ -0,0 +1,104 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface StorageErrorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.StorageError) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * BigQuery Storage specific error code.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode code = 1; + * + * @return The enum numeric value on the wire for code. + */ + int getCodeValue(); + + /** + * + * + *
+   * BigQuery Storage specific error code.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode code = 1; + * + * @return The code. + */ + com.google.cloud.bigquery.storage.v1beta2.StorageError.StorageErrorCode getCode(); + + /** + * + * + *
+   * Name of the failed entity.
+   * 
+ * + * string entity = 2; + * + * @return The entity. + */ + java.lang.String getEntity(); + + /** + * + * + *
+   * Name of the failed entity.
+   * 
+ * + * string entity = 2; + * + * @return The bytes for entity. + */ + com.google.protobuf.ByteString getEntityBytes(); + + /** + * + * + *
+   * Message that describes the error.
+   * 
+ * + * string error_message = 3; + * + * @return The errorMessage. + */ + java.lang.String getErrorMessage(); + + /** + * + * + *
+   * Message that describes the error.
+   * 
+ * + * string error_message = 3; + * + * @return The bytes for errorMessage. + */ + com.google.protobuf.ByteString getErrorMessageBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java new file mode 100644 index 000000000000..4c33c8d7470b --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java @@ -0,0 +1,507 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public final class StorageProto { + private StorageProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_CreateWriteStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_CreateWriteStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_ProtoData_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_ProtoData_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_AppendResult_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_AppendResult_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_GetWriteStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_GetWriteStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_StorageError_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_StorageError_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n3google/cloud/bigquery/storage/v1beta2/" + + "storage.proto\022%google.cloud.bigquery.sto" + + "rage.v1beta2\032\034google/api/annotations.pro" + + "to\032\027google/api/client.proto\032\037google/api/" + + "field_behavior.proto\032\031google/api/resourc" + + "e.proto\0321google/cloud/bigquery/storage/v" + + "1beta2/arrow.proto\0320google/cloud/bigquer" + + "y/storage/v1beta2/avro.proto\0324google/clo" + + "ud/bigquery/storage/v1beta2/protobuf.pro" + + "to\0322google/cloud/bigquery/storage/v1beta" + + "2/stream.proto\0321google/cloud/bigquery/st" + + "orage/v1beta2/table.proto\032\037google/protob" + + "uf/timestamp.proto\032\036google/protobuf/wrap" + + "pers.proto\032\027google/rpc/status.proto\"\310\001\n\030" + + "CreateReadSessionRequest\022C\n\006parent\030\001 \001(\t" + + "B3\340A\002\372A-\n+cloudresourcemanager.googleapi" + + "s.com/Project\022M\n\014read_session\030\002 \001(\01322.go" + + "ogle.cloud.bigquery.storage.v1beta2.Read" + + "SessionB\003\340A\002\022\030\n\020max_stream_count\030\003 \001(\005\"i" + + "\n\017ReadRowsRequest\022F\n\013read_stream\030\001 \001(\tB1" + + "\340A\002\372A+\n)bigquerystorage.googleapis.com/R" + + "eadStream\022\016\n\006offset\030\002 \001(\003\")\n\rThrottleSta" + + "te\022\030\n\020throttle_percent\030\001 \001(\005\"\234\001\n\013StreamS" + + "tats\022M\n\010progress\030\002 \001(\0132;.google.cloud.bi" + + "gquery.storage.v1beta2.StreamStats.Progr" + + "ess\032>\n\010Progress\022\031\n\021at_response_start\030\001 \001" + + "(\001\022\027\n\017at_response_end\030\002 \001(\001\"\205\004\n\020ReadRows" + + "Response\022D\n\tavro_rows\030\003 \001(\0132/.google.clo" + + "ud.bigquery.storage.v1beta2.AvroRowsH\000\022U" + + "\n\022arrow_record_batch\030\004 \001(\01327.google.clou" + + "d.bigquery.storage.v1beta2.ArrowRecordBa" + + "tchH\000\022\021\n\trow_count\030\006 \001(\003\022A\n\005stats\030\002 \001(\0132" + + "2.google.cloud.bigquery.storage.v1beta2." + + "StreamStats\022L\n\016throttle_state\030\005 \001(\01324.go" + + "ogle.cloud.bigquery.storage.v1beta2.Thro" + + "ttleState\022M\n\013avro_schema\030\007 \001(\01321.google." + + "cloud.bigquery.storage.v1beta2.AvroSchem" + + "aB\003\340A\003H\001\022O\n\014arrow_schema\030\010 \001(\01322.google." + + "cloud.bigquery.storage.v1beta2.ArrowSche" + + "maB\003\340A\003H\001B\006\n\004rowsB\010\n\006schema\"k\n\026SplitRead" + + "StreamRequest\022?\n\004name\030\001 \001(\tB1\340A\002\372A+\n)big" + + "querystorage.googleapis.com/ReadStream\022\020" + + "\n\010fraction\030\002 \001(\001\"\261\001\n\027SplitReadStreamResp" + + "onse\022I\n\016primary_stream\030\001 \001(\01321.google.cl" + + "oud.bigquery.storage.v1beta2.ReadStream\022" + + "K\n\020remainder_stream\030\002 \001(\01321.google.cloud" + + ".bigquery.storage.v1beta2.ReadStream\"\240\001\n" + + "\030CreateWriteStreamRequest\0225\n\006parent\030\001 \001(" + + "\tB%\340A\002\372A\037\n\035bigquery.googleapis.com/Table" + + "\022M\n\014write_stream\030\002 \001(\01322.google.cloud.bi" + + "gquery.storage.v1beta2.WriteStreamB\003\340A\002\"" + + "\227\003\n\021AppendRowsRequest\022H\n\014write_stream\030\001 " + + "\001(\tB2\340A\002\372A,\n*bigquerystorage.googleapis." + + "com/WriteStream\022+\n\006offset\030\002 \001(\0132\033.google" + + ".protobuf.Int64Value\022X\n\nproto_rows\030\004 \001(\013" + + "2B.google.cloud.bigquery.storage.v1beta2" + + ".AppendRowsRequest.ProtoDataH\000\022\020\n\010trace_" + + "id\030\006 \001(\t\032\226\001\n\tProtoData\022I\n\rwriter_schema\030" + + "\001 \001(\01322.google.cloud.bigquery.storage.v1" + + "beta2.ProtoSchema\022>\n\004rows\030\002 \001(\01320.google" + + ".cloud.bigquery.storage.v1beta2.ProtoRow" + + "sB\006\n\004rows\"\257\002\n\022AppendRowsResponse\022_\n\rappe" + + "nd_result\030\001 \001(\0132F.google.cloud.bigquery." + + "storage.v1beta2.AppendRowsResponse.Appen" + + "dResultH\000\022#\n\005error\030\002 \001(\0132\022.google.rpc.St" + + "atusH\000\022J\n\016updated_schema\030\003 \001(\01322.google." + + "cloud.bigquery.storage.v1beta2.TableSche" + + "ma\032;\n\014AppendResult\022+\n\006offset\030\001 \001(\0132\033.goo" + + "gle.protobuf.Int64ValueB\n\n\010response\"Y\n\025G" + + "etWriteStreamRequest\022@\n\004name\030\001 \001(\tB2\340A\002\372" + + "A,\n*bigquerystorage.googleapis.com/Write" + + "Stream\"Q\n\036BatchCommitWriteStreamsRequest" + + "\022\023\n\006parent\030\001 \001(\tB\003\340A\002\022\032\n\rwrite_streams\030\002" + + " \003(\tB\003\340A\002\"\236\001\n\037BatchCommitWriteStreamsRes" + + "ponse\022/\n\013commit_time\030\001 \001(\0132\032.google.prot" + + "obuf.Timestamp\022J\n\rstream_errors\030\002 \003(\01323." + + "google.cloud.bigquery.storage.v1beta2.St" + + "orageError\"^\n\032FinalizeWriteStreamRequest" + + "\022@\n\004name\030\001 \001(\tB2\340A\002\372A,\n*bigquerystorage." + + "googleapis.com/WriteStream\"0\n\033FinalizeWr" + + "iteStreamResponse\022\021\n\trow_count\030\001 \001(\003\"\211\001\n" + + "\020FlushRowsRequest\022H\n\014write_stream\030\001 \001(\tB" + + "2\340A\002\372A,\n*bigquerystorage.googleapis.com/" + + "WriteStream\022+\n\006offset\030\002 \001(\0132\033.google.pro" + + "tobuf.Int64Value\"#\n\021FlushRowsResponse\022\016\n" + + "\006offset\030\001 \001(\003\"\324\002\n\014StorageError\022R\n\004code\030\001" + + " \001(\0162D.google.cloud.bigquery.storage.v1b" + + "eta2.StorageError.StorageErrorCode\022\016\n\006en" + + "tity\030\002 \001(\t\022\025\n\rerror_message\030\003 \001(\t\"\310\001\n\020St" + + "orageErrorCode\022\"\n\036STORAGE_ERROR_CODE_UNS" + + "PECIFIED\020\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\034\n\030STREA" + + "M_ALREADY_COMMITTED\020\002\022\024\n\020STREAM_NOT_FOUN" + + "D\020\003\022\027\n\023INVALID_STREAM_TYPE\020\004\022\030\n\024INVALID_" + + "STREAM_STATE\020\005\022\024\n\020STREAM_FINALIZED\020\0062\277\006\n" + + "\014BigQueryRead\022\370\001\n\021CreateReadSession\022?.go" + + "ogle.cloud.bigquery.storage.v1beta2.Crea" + + "teReadSessionRequest\0322.google.cloud.bigq" + + "uery.storage.v1beta2.ReadSession\"n\332A$par" + + "ent,read_session,max_stream_count\202\323\344\223\002A\"" + + ".google.cloud.bigquery.sto" + + "rage.v1beta2.SplitReadStreamResponse\"C\202\323" + + "\344\223\002=\022;/v1beta2/{name=projects/*/location" + + "s/*/sessions/*/streams/*}\032{\312A\036bigqueryst" + + "orage.googleapis.com\322AWhttps://www.googl" + + "eapis.com/auth/bigquery,https://www.goog" + + "leapis.com/auth/cloud-platform2\253\014\n\rBigQu" + + "eryWrite\022\351\001\n\021CreateWriteStream\022?.google." + + "cloud.bigquery.storage.v1beta2.CreateWri" + + "teStreamRequest\0322.google.cloud.bigquery." + + "storage.v1beta2.WriteStream\"_\210\002\001\332A\023paren" + + "t,write_stream\202\323\344\223\002@\"0/v1beta2/{parent=p" + + "rojects/*/datasets/*/tables/*}:\014write_st" + + "ream\022\344\001\n\nAppendRows\0228.google.cloud.bigqu" + + "ery.storage.v1beta2.AppendRowsRequest\0329." + + "google.cloud.bigquery.storage.v1beta2.Ap" + + "pendRowsResponse\"]\210\002\001\332A\014write_stream\202\323\344\223" + + "\002E\"@/v1beta2/{write_stream=projects/*/da" + + "tasets/*/tables/*/streams/*}:\001*(\0010\001\022\321\001\n\016" + + "GetWriteStream\022<.google.cloud.bigquery.s" + + "torage.v1beta2.GetWriteStreamRequest\0322.g" + + "oogle.cloud.bigquery.storage.v1beta2.Wri" + + "teStream\"M\210\002\001\332A\004name\202\323\344\223\002=\"8/v1beta2/{na" + + "me=projects/*/datasets/*/tables/*/stream" + + "s/*}:\001*\022\353\001\n\023FinalizeWriteStream\022A.google" + + ".cloud.bigquery.storage.v1beta2.Finalize" + + "WriteStreamRequest\032B.google.cloud.bigque" + + "ry.storage.v1beta2.FinalizeWriteStreamRe" + + "sponse\"M\210\002\001\332A\004name\202\323\344\223\002=\"8/v1beta2/{name" + + "=projects/*/datasets/*/tables/*/streams/" + + "*}:\001*\022\356\001\n\027BatchCommitWriteStreams\022E.goog" + + "le.cloud.bigquery.storage.v1beta2.BatchC" + + "ommitWriteStreamsRequest\032F.google.cloud." + + "bigquery.storage.v1beta2.BatchCommitWrit" + + "eStreamsResponse\"D\210\002\001\332A\006parent\202\323\344\223\0022\0220/v" + + "1beta2/{parent=projects/*/datasets/*/tab" + + "les/*}\022\335\001\n\tFlushRows\0227.google.cloud.bigq" + + "uery.storage.v1beta2.FlushRowsRequest\0328." + + "google.cloud.bigquery.storage.v1beta2.Fl" + + "ushRowsResponse\"]\210\002\001\332A\014write_stream\202\323\344\223\002" + + "E\"@/v1beta2/{write_stream=projects/*/dat" + + "asets/*/tables/*/streams/*}:\001*\032\263\001\210\002\001\312A\036b" + + "igquerystorage.googleapis.com\322A\213\001https:/" + + "/www.googleapis.com/auth/bigquery,https:" + + "//www.googleapis.com/auth/bigquery.inser" + + "tdata,https://www.googleapis.com/auth/cl" + + "oud-platformB\200\001\n)com.google.cloud.bigque" + + "ry.storage.v1beta2B\014StorageProtoP\001ZCclou" + + "d.google.com/go/bigquery/storage/apiv1be" + + "ta2/storagepb;storagepbb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.ArrowProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.AvroProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.ProtoBufProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.StreamProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.TableProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.protobuf.WrappersProto.getDescriptor(), + com.google.rpc.StatusProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_descriptor, + new java.lang.String[] { + "Parent", "ReadSession", "MaxStreamCount", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_descriptor, + new java.lang.String[] { + "ReadStream", "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_descriptor, + new java.lang.String[] { + "ThrottlePercent", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor, + new java.lang.String[] { + "Progress", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_descriptor = + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_descriptor, + new java.lang.String[] { + "AtResponseStart", "AtResponseEnd", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor, + new java.lang.String[] { + "AvroRows", + "ArrowRecordBatch", + "RowCount", + "Stats", + "ThrottleState", + "AvroSchema", + "ArrowSchema", + "Rows", + "Schema", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor, + new java.lang.String[] { + "Name", "Fraction", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_descriptor, + new java.lang.String[] { + "PrimaryStream", "RemainderStream", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_CreateWriteStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_bigquery_storage_v1beta2_CreateWriteStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_CreateWriteStreamRequest_descriptor, + new java.lang.String[] { + "Parent", "WriteStream", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_descriptor, + new java.lang.String[] { + "WriteStream", "Offset", "ProtoRows", "TraceId", "Rows", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_ProtoData_descriptor = + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_ProtoData_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsRequest_ProtoData_descriptor, + new java.lang.String[] { + "WriterSchema", "Rows", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_descriptor, + new java.lang.String[] { + "AppendResult", "Error", "UpdatedSchema", "Response", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_AppendResult_descriptor = + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_AppendResult_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_AppendRowsResponse_AppendResult_descriptor, + new java.lang.String[] { + "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_GetWriteStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_google_cloud_bigquery_storage_v1beta2_GetWriteStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_GetWriteStreamRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsRequest_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsRequest_descriptor, + new java.lang.String[] { + "Parent", "WriteStreams", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsResponse_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_BatchCommitWriteStreamsResponse_descriptor, + new java.lang.String[] { + "CommitTime", "StreamErrors", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamResponse_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_FinalizeWriteStreamResponse_descriptor, + new java.lang.String[] { + "RowCount", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsRequest_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsRequest_descriptor, + new java.lang.String[] { + "WriteStream", "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsResponse_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_FlushRowsResponse_descriptor, + new java.lang.String[] { + "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_StorageError_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_google_cloud_bigquery_storage_v1beta2_StorageError_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_StorageError_descriptor, + new java.lang.String[] { + "Code", "Entity", "ErrorMessage", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.ArrowProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.AvroProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.ProtoBufProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.StreamProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.TableProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.protobuf.WrappersProto.getDescriptor(); + com.google.rpc.StatusProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java new file mode 100644 index 000000000000..802689d3f20f --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java @@ -0,0 +1,203 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public final class StreamProto { + private StreamProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_WriteStream_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_WriteStream_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "2google/cloud/bigquery/storage/v1beta2/stream.proto\022%google.cloud.bigquery.stor" + + "age.v1beta2\032\037google/api/field_behavior.p" + + "roto\032\031google/api/resource.proto\0321google/" + + "cloud/bigquery/storage/v1beta2/arrow.proto\0320google/cloud/bigquery/storage/v1beta" + + "2/avro.proto\0321google/cloud/bigquery/stor" + + "age/v1beta2/table.proto\032\037google/protobuf/timestamp.proto\"\362\007\n" + + "\013ReadSession\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\003\0224\n" + + "\013expire_time\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022K\n" + + "\013data_format\030\003" + + " \001(\01621.google.cloud.bigquery.storage.v1beta2.DataFormatB\003\340A\005\022M\n" + + "\013avro_schema\030\004 " + + "\001(\01321.google.cloud.bigquery.storage.v1beta2.AvroSchemaB\003\340A\003H\000\022O\n" + + "\014arrow_schema\030\005 " + + "\001(\01322.google.cloud.bigquery.storage.v1beta2.ArrowSchemaB\003\340A\003H\000\0224\n" + + "\005table\030\006 \001(\tB%\340A\005\372A\037\n" + + "\035bigquery.googleapis.com/Table\022_\n" + + "\017table_modifiers\030\007 \001(\0132A.google.cloud.big" + + "query.storage.v1beta2.ReadSession.TableModifiersB\003\340A\001\022^\n" + + "\014read_options\030\010 \001(\0132C.go" + + "ogle.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsB\003\340A\001\022G\n" + + "\007streams\030\n" + + " \003(\01321.google.cloud.bigquery.storage.v1beta2.ReadStreamB\003\340A\003\032C\n" + + "\016TableModifiers\0221\n\r" + + "snapshot_time\030\001 \001(\0132\032.google.protobuf.Timestamp\032\260\001\n" + + "\020TableReadOptions\022\027\n" + + "\017selected_fields\030\001 \003(\t\022\027\n" + + "\017row_restriction\030\002 \001(\t\022j\n" + + "\033arrow_serialization_options\030\003 \001(\0132" + + "@.google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptionsB\003\340A\001:k\352Ah\n" + + "*bigquerystorage.googleapis.com/ReadSession\022" + + ":projects/{project}/locations/{location}/sessions/{session}B\010\n" + + "\006schema\"\234\001\n\n" + + "ReadStream\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\003:{\352Ax\n" + + ")bigquerystorage.googleapis.com/ReadStream\022Kproject" + + "s/{project}/locations/{location}/sessions/{session}/streams/{stream}\"\347\003\n" + + "\013WriteStream\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\003\022J\n" + + "\004type\030\002 \001(\01627." + + "google.cloud.bigquery.storage.v1beta2.WriteStream.TypeB\003\340A\005\0224\n" + + "\013create_time\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0224\n" + + "\013commit_time\030\004 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022M\n" + + "\014table_schema\030\005 \001(\01322.google.c" + + "loud.bigquery.storage.v1beta2.TableSchemaB\003\340A\003\"F\n" + + "\004Type\022\024\n" + + "\020TYPE_UNSPECIFIED\020\000\022\r\n" + + "\tCOMMITTED\020\001\022\013\n" + + "\007PENDING\020\002\022\014\n" + + "\010BUFFERED\020\003:v\352As\n" + + "*bigquerystorage.googleapis.com/WriteStream\022Eprojects/{project}/datasets/{da" + + "taset}/tables/{table}/streams/{stream}*>\n\n" + + "DataFormat\022\033\n" + + "\027DATA_FORMAT_UNSPECIFIED\020\000\022\010\n" + + "\004AVRO\020\001\022\t\n" + + "\005ARROW\020\002B\327\001\n" + + ")com.google.cloud.bigquery.storage.v1beta2B\013StreamProt" + + "oP\001ZCcloud.google.com/go/bigquery/storage/apiv1beta2/storagepb;storagepb\352AU\n" + + "\035bigquery.googleapis.com/Table\0224projects/{pr" + + "oject}/datasets/{dataset}/tables/{table}b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.ArrowProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.AvroProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.TableProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor, + new java.lang.String[] { + "Name", + "ExpireTime", + "DataFormat", + "AvroSchema", + "ArrowSchema", + "Table", + "TableModifiers", + "ReadOptions", + "Streams", + "Schema", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_descriptor = + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_descriptor, + new java.lang.String[] { + "SnapshotTime", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_descriptor = + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor + .getNestedTypes() + .get(1); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_descriptor, + new java.lang.String[] { + "SelectedFields", "RowRestriction", "ArrowSerializationOptions", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_WriteStream_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1beta2_WriteStream_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_WriteStream_descriptor, + new java.lang.String[] { + "Name", "Type", "CreateTime", "CommitTime", "TableSchema", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.ResourceProto.resource); + registry.add(com.google.api.ResourceProto.resourceDefinition); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.ArrowProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.AvroProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.TableProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStats.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStats.java new file mode 100644 index 000000000000..da9c2978e25f --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStats.java @@ -0,0 +1,1460 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Estimated stream statistics for a given Stream.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.StreamStats} + */ +public final class StreamStats extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.StreamStats) + StreamStatsOrBuilder { + private static final long serialVersionUID = 0L; + + // Use StreamStats.newBuilder() to construct. + private StreamStats(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamStats() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StreamStats(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.class, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder.class); + } + + public interface ProgressOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The fraction of rows assigned to the stream that have been processed by
+     * the server so far, not including the rows in the current response
+     * message.
+     *
+     * This value, along with `at_response_end`, can be used to interpolate
+     * the progress made as the rows in the message are being processed using
+     * the following formula: `at_response_start + (at_response_end -
+     * at_response_start) * rows_processed_from_response / rows_in_response`.
+     *
+     * Note that if a filter is provided, the `at_response_end` value of the
+     * previous response may not necessarily be equal to the
+     * `at_response_start` value of the current response.
+     * 
+ * + * double at_response_start = 1; + * + * @return The atResponseStart. + */ + double getAtResponseStart(); + + /** + * + * + *
+     * Similar to `at_response_start`, except that this value includes the
+     * rows in the current response.
+     * 
+ * + * double at_response_end = 2; + * + * @return The atResponseEnd. + */ + double getAtResponseEnd(); + } + + /** Protobuf type {@code google.cloud.bigquery.storage.v1beta2.StreamStats.Progress} */ + public static final class Progress extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) + ProgressOrBuilder { + private static final long serialVersionUID = 0L; + + // Use Progress.newBuilder() to construct. + private Progress(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Progress() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new Progress(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.class, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder.class); + } + + public static final int AT_RESPONSE_START_FIELD_NUMBER = 1; + private double atResponseStart_ = 0D; + + /** + * + * + *
+     * The fraction of rows assigned to the stream that have been processed by
+     * the server so far, not including the rows in the current response
+     * message.
+     *
+     * This value, along with `at_response_end`, can be used to interpolate
+     * the progress made as the rows in the message are being processed using
+     * the following formula: `at_response_start + (at_response_end -
+     * at_response_start) * rows_processed_from_response / rows_in_response`.
+     *
+     * Note that if a filter is provided, the `at_response_end` value of the
+     * previous response may not necessarily be equal to the
+     * `at_response_start` value of the current response.
+     * 
+ * + * double at_response_start = 1; + * + * @return The atResponseStart. + */ + @java.lang.Override + public double getAtResponseStart() { + return atResponseStart_; + } + + public static final int AT_RESPONSE_END_FIELD_NUMBER = 2; + private double atResponseEnd_ = 0D; + + /** + * + * + *
+     * Similar to `at_response_start`, except that this value includes the
+     * rows in the current response.
+     * 
+ * + * double at_response_end = 2; + * + * @return The atResponseEnd. + */ + @java.lang.Override + public double getAtResponseEnd() { + return atResponseEnd_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (java.lang.Double.doubleToRawLongBits(atResponseStart_) != 0) { + output.writeDouble(1, atResponseStart_); + } + if (java.lang.Double.doubleToRawLongBits(atResponseEnd_) != 0) { + output.writeDouble(2, atResponseEnd_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (java.lang.Double.doubleToRawLongBits(atResponseStart_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(1, atResponseStart_); + } + if (java.lang.Double.doubleToRawLongBits(atResponseEnd_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(2, atResponseEnd_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress other = + (com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) obj; + + if (java.lang.Double.doubleToLongBits(getAtResponseStart()) + != java.lang.Double.doubleToLongBits(other.getAtResponseStart())) return false; + if (java.lang.Double.doubleToLongBits(getAtResponseEnd()) + != java.lang.Double.doubleToLongBits(other.getAtResponseEnd())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + AT_RESPONSE_START_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getAtResponseStart())); + hash = (37 * hash) + AT_RESPONSE_END_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getAtResponseEnd())); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code google.cloud.bigquery.storage.v1beta2.StreamStats.Progress} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) + com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.class, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + atResponseStart_ = 0D; + atResponseEnd_ = 0D; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress build() { + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress result = + new com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.atResponseStart_ = atResponseStart_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.atResponseEnd_ = atResponseEnd_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.getDefaultInstance()) + return this; + if (other.getAtResponseStart() != 0D) { + setAtResponseStart(other.getAtResponseStart()); + } + if (other.getAtResponseEnd() != 0D) { + setAtResponseEnd(other.getAtResponseEnd()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 9: + { + atResponseStart_ = input.readDouble(); + bitField0_ |= 0x00000001; + break; + } // case 9 + case 17: + { + atResponseEnd_ = input.readDouble(); + bitField0_ |= 0x00000002; + break; + } // case 17 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private double atResponseStart_; + + /** + * + * + *
+       * The fraction of rows assigned to the stream that have been processed by
+       * the server so far, not including the rows in the current response
+       * message.
+       *
+       * This value, along with `at_response_end`, can be used to interpolate
+       * the progress made as the rows in the message are being processed using
+       * the following formula: `at_response_start + (at_response_end -
+       * at_response_start) * rows_processed_from_response / rows_in_response`.
+       *
+       * Note that if a filter is provided, the `at_response_end` value of the
+       * previous response may not necessarily be equal to the
+       * `at_response_start` value of the current response.
+       * 
+ * + * double at_response_start = 1; + * + * @return The atResponseStart. + */ + @java.lang.Override + public double getAtResponseStart() { + return atResponseStart_; + } + + /** + * + * + *
+       * The fraction of rows assigned to the stream that have been processed by
+       * the server so far, not including the rows in the current response
+       * message.
+       *
+       * This value, along with `at_response_end`, can be used to interpolate
+       * the progress made as the rows in the message are being processed using
+       * the following formula: `at_response_start + (at_response_end -
+       * at_response_start) * rows_processed_from_response / rows_in_response`.
+       *
+       * Note that if a filter is provided, the `at_response_end` value of the
+       * previous response may not necessarily be equal to the
+       * `at_response_start` value of the current response.
+       * 
+ * + * double at_response_start = 1; + * + * @param value The atResponseStart to set. + * @return This builder for chaining. + */ + public Builder setAtResponseStart(double value) { + + atResponseStart_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+       * The fraction of rows assigned to the stream that have been processed by
+       * the server so far, not including the rows in the current response
+       * message.
+       *
+       * This value, along with `at_response_end`, can be used to interpolate
+       * the progress made as the rows in the message are being processed using
+       * the following formula: `at_response_start + (at_response_end -
+       * at_response_start) * rows_processed_from_response / rows_in_response`.
+       *
+       * Note that if a filter is provided, the `at_response_end` value of the
+       * previous response may not necessarily be equal to the
+       * `at_response_start` value of the current response.
+       * 
+ * + * double at_response_start = 1; + * + * @return This builder for chaining. + */ + public Builder clearAtResponseStart() { + bitField0_ = (bitField0_ & ~0x00000001); + atResponseStart_ = 0D; + onChanged(); + return this; + } + + private double atResponseEnd_; + + /** + * + * + *
+       * Similar to `at_response_start`, except that this value includes the
+       * rows in the current response.
+       * 
+ * + * double at_response_end = 2; + * + * @return The atResponseEnd. + */ + @java.lang.Override + public double getAtResponseEnd() { + return atResponseEnd_; + } + + /** + * + * + *
+       * Similar to `at_response_start`, except that this value includes the
+       * rows in the current response.
+       * 
+ * + * double at_response_end = 2; + * + * @param value The atResponseEnd to set. + * @return This builder for chaining. + */ + public Builder setAtResponseEnd(double value) { + + atResponseEnd_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+       * Similar to `at_response_start`, except that this value includes the
+       * rows in the current response.
+       * 
+ * + * double at_response_end = 2; + * + * @return This builder for chaining. + */ + public Builder clearAtResponseEnd() { + bitField0_ = (bitField0_ & ~0x00000002); + atResponseEnd_ = 0D; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) + private static final com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Progress parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int PROGRESS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress_; + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + * + * @return Whether the progress field is set. + */ + @java.lang.Override + public boolean hasProgress() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + * + * @return The progress. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress getProgress() { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.getDefaultInstance() + : progress_; + } + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder + getProgressOrBuilder() { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.getDefaultInstance() + : progress_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getProgress()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getProgress()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.StreamStats)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.StreamStats other = + (com.google.cloud.bigquery.storage.v1beta2.StreamStats) obj; + + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.StreamStats prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Estimated stream statistics for a given Stream.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.StreamStats} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.StreamStats) + com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.class, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.StreamStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getProgressFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamStats.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats build() { + com.google.cloud.bigquery.storage.v1beta2.StreamStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.StreamStats result = + new com.google.cloud.bigquery.storage.v1beta2.StreamStats(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.StreamStats result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.progress_ = progressBuilder_ == null ? progress_ : progressBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.StreamStats) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.StreamStats) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.StreamStats other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.StreamStats.getDefaultInstance()) + return this; + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + input.readMessage(getProgressFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder> + progressBuilder_; + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + * + * @return The progress. + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public Builder setProgress( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + } else { + progressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public Builder setProgress( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public Builder mergeProgress( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress value) { + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && progress_ != null + && progress_ + != com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress + .getDefaultInstance()) { + getProgressBuilder().mergeFrom(value); + } else { + progress_ = value; + } + } else { + progressBuilder_.mergeFrom(value); + } + if (progress_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000001); + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder + getProgressBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getProgressFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder + getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.getDefaultInstance() + : progress_; + } + } + + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder> + getProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.StreamStats) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.StreamStats) + private static final com.google.cloud.bigquery.storage.v1beta2.StreamStats DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.StreamStats(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StreamStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStatsOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStatsOrBuilder.java new file mode 100644 index 000000000000..25b70e48f9cb --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStatsOrBuilder.java @@ -0,0 +1,63 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface StreamStatsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.StreamStats) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + * + * @return The progress. + */ + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress getProgress(); + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder getProgressOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchema.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchema.java new file mode 100644 index 000000000000..dc3f105c00ca --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchema.java @@ -0,0 +1,2353 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/table.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * A field in TableSchema
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.TableFieldSchema} + */ +public final class TableFieldSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.TableFieldSchema) + TableFieldSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use TableFieldSchema.newBuilder() to construct. + private TableFieldSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableFieldSchema() { + name_ = ""; + type_ = 0; + mode_ = 0; + fields_ = java.util.Collections.emptyList(); + description_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableFieldSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.TableProto + .internal_static_google_cloud_bigquery_storage_v1beta2_TableFieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.TableProto + .internal_static_google_cloud_bigquery_storage_v1beta2_TableFieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.class, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder.class); + } + + /** Protobuf enum {@code google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type} */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Illegal value
+     * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
+     * 64K, UTF8
+     * 
+ * + * STRING = 1; + */ + STRING(1), + /** + * + * + *
+     * 64-bit signed
+     * 
+ * + * INT64 = 2; + */ + INT64(2), + /** + * + * + *
+     * 64-bit IEEE floating point
+     * 
+ * + * DOUBLE = 3; + */ + DOUBLE(3), + /** + * + * + *
+     * Aggregate type
+     * 
+ * + * STRUCT = 4; + */ + STRUCT(4), + /** + * + * + *
+     * 64K, Binary
+     * 
+ * + * BYTES = 5; + */ + BYTES(5), + /** + * + * + *
+     * 2-valued
+     * 
+ * + * BOOL = 6; + */ + BOOL(6), + /** + * + * + *
+     * 64-bit signed usec since UTC epoch
+     * 
+ * + * TIMESTAMP = 7; + */ + TIMESTAMP(7), + /** + * + * + *
+     * Civil date - Year, Month, Day
+     * 
+ * + * DATE = 8; + */ + DATE(8), + /** + * + * + *
+     * Civil time - Hour, Minute, Second, Microseconds
+     * 
+ * + * TIME = 9; + */ + TIME(9), + /** + * + * + *
+     * Combination of civil date and civil time
+     * 
+ * + * DATETIME = 10; + */ + DATETIME(10), + /** + * + * + *
+     * Geography object
+     * 
+ * + * GEOGRAPHY = 11; + */ + GEOGRAPHY(11), + /** + * + * + *
+     * Numeric value
+     * 
+ * + * NUMERIC = 12; + */ + NUMERIC(12), + /** + * + * + *
+     * BigNumeric value
+     * 
+ * + * BIGNUMERIC = 13; + */ + BIGNUMERIC(13), + /** + * + * + *
+     * Interval
+     * 
+ * + * INTERVAL = 14; + */ + INTERVAL(14), + /** + * + * + *
+     * JSON, String
+     * 
+ * + * JSON = 15; + */ + JSON(15), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Illegal value
+     * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * 64K, UTF8
+     * 
+ * + * STRING = 1; + */ + public static final int STRING_VALUE = 1; + + /** + * + * + *
+     * 64-bit signed
+     * 
+ * + * INT64 = 2; + */ + public static final int INT64_VALUE = 2; + + /** + * + * + *
+     * 64-bit IEEE floating point
+     * 
+ * + * DOUBLE = 3; + */ + public static final int DOUBLE_VALUE = 3; + + /** + * + * + *
+     * Aggregate type
+     * 
+ * + * STRUCT = 4; + */ + public static final int STRUCT_VALUE = 4; + + /** + * + * + *
+     * 64K, Binary
+     * 
+ * + * BYTES = 5; + */ + public static final int BYTES_VALUE = 5; + + /** + * + * + *
+     * 2-valued
+     * 
+ * + * BOOL = 6; + */ + public static final int BOOL_VALUE = 6; + + /** + * + * + *
+     * 64-bit signed usec since UTC epoch
+     * 
+ * + * TIMESTAMP = 7; + */ + public static final int TIMESTAMP_VALUE = 7; + + /** + * + * + *
+     * Civil date - Year, Month, Day
+     * 
+ * + * DATE = 8; + */ + public static final int DATE_VALUE = 8; + + /** + * + * + *
+     * Civil time - Hour, Minute, Second, Microseconds
+     * 
+ * + * TIME = 9; + */ + public static final int TIME_VALUE = 9; + + /** + * + * + *
+     * Combination of civil date and civil time
+     * 
+ * + * DATETIME = 10; + */ + public static final int DATETIME_VALUE = 10; + + /** + * + * + *
+     * Geography object
+     * 
+ * + * GEOGRAPHY = 11; + */ + public static final int GEOGRAPHY_VALUE = 11; + + /** + * + * + *
+     * Numeric value
+     * 
+ * + * NUMERIC = 12; + */ + public static final int NUMERIC_VALUE = 12; + + /** + * + * + *
+     * BigNumeric value
+     * 
+ * + * BIGNUMERIC = 13; + */ + public static final int BIGNUMERIC_VALUE = 13; + + /** + * + * + *
+     * Interval
+     * 
+ * + * INTERVAL = 14; + */ + public static final int INTERVAL_VALUE = 14; + + /** + * + * + *
+     * JSON, String
+     * 
+ * + * JSON = 15; + */ + public static final int JSON_VALUE = 15; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return STRING; + case 2: + return INT64; + case 3: + return DOUBLE; + case 4: + return STRUCT; + case 5: + return BYTES; + case 6: + return BOOL; + case 7: + return TIMESTAMP; + case 8: + return DATE; + case 9: + return TIME; + case 10: + return DATETIME; + case 11: + return GEOGRAPHY; + case 12: + return NUMERIC; + case 13: + return BIGNUMERIC; + case 14: + return INTERVAL; + case 15: + return JSON; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type) + } + + /** Protobuf enum {@code google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode} */ + public enum Mode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Illegal value
+     * 
+ * + * MODE_UNSPECIFIED = 0; + */ + MODE_UNSPECIFIED(0), + /** NULLABLE = 1; */ + NULLABLE(1), + /** REQUIRED = 2; */ + REQUIRED(2), + /** REPEATED = 3; */ + REPEATED(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Illegal value
+     * 
+ * + * MODE_UNSPECIFIED = 0; + */ + public static final int MODE_UNSPECIFIED_VALUE = 0; + + /** NULLABLE = 1; */ + public static final int NULLABLE_VALUE = 1; + + /** REQUIRED = 2; */ + public static final int REQUIRED_VALUE = 2; + + /** REPEATED = 3; */ + public static final int REPEATED_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Mode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Mode forNumber(int value) { + switch (value) { + case 0: + return MODE_UNSPECIFIED; + case 1: + return NULLABLE; + case 2: + return REQUIRED; + case 3: + return REPEATED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Mode findValueByNumber(int number) { + return Mode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.getDescriptor() + .getEnumTypes() + .get(1); + } + + private static final Mode[] VALUES = values(); + + public static Mode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Mode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode) + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Required. The field name. The name must contain only letters (a-z, A-Z),
+   * numbers (0-9), or underscores (_), and must start with a letter or
+   * underscore. The maximum length is 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Required. The field name. The name must contain only letters (a-z, A-Z),
+   * numbers (0-9), or underscores (_), and must start with a letter or
+   * underscore. The maximum length is 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private int type_ = 0; + + /** + * + * + *
+   * Required. The field data type.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
+   * Required. The field data type.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type getType() { + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type result = + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type.forNumber(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type.UNRECOGNIZED + : result; + } + + public static final int MODE_FIELD_NUMBER = 3; + private int mode_ = 0; + + /** + * + * + *
+   * Optional. The field mode. The default value is NULLABLE.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + @java.lang.Override + public int getModeValue() { + return mode_; + } + + /** + * + * + *
+   * Optional. The field mode. The default value is NULLABLE.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode getMode() { + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode result = + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode.forNumber(mode_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode.UNRECOGNIZED + : result; + } + + public static final int FIELDS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List fields_; + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getFieldsList() { + return fields_; + } + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder> + getFieldsOrBuilderList() { + return fields_; + } + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getFieldsCount() { + return fields_.size(); + } + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema getFields(int index) { + return fields_.get(index); + } + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + return fields_.get(index); + } + + public static final int DESCRIPTION_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object description_ = ""; + + /** + * + * + *
+   * Optional. The field description. The maximum length is 1,024 characters.
+   * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + @java.lang.Override + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } + } + + /** + * + * + *
+   * Optional. The field description. The maximum length is 1,024 characters.
+   * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type.TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, type_); + } + if (mode_ + != com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode.MODE_UNSPECIFIED + .getNumber()) { + output.writeEnum(3, mode_); + } + for (int i = 0; i < fields_.size(); i++) { + output.writeMessage(4, fields_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(description_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, description_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type.TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, type_); + } + if (mode_ + != com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode.MODE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, mode_); + } + for (int i = 0; i < fields_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, fields_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(description_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, description_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema other = + (com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema) obj; + + if (!getName().equals(other.getName())) return false; + if (type_ != other.type_) return false; + if (mode_ != other.mode_) return false; + if (!getFieldsList().equals(other.getFieldsList())) return false; + if (!getDescription().equals(other.getDescription())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + hash = (37 * hash) + MODE_FIELD_NUMBER; + hash = (53 * hash) + mode_; + if (getFieldsCount() > 0) { + hash = (37 * hash) + FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getFieldsList().hashCode(); + } + hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; + hash = (53 * hash) + getDescription().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * A field in TableSchema
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.TableFieldSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.TableFieldSchema) + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.TableProto + .internal_static_google_cloud_bigquery_storage_v1beta2_TableFieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.TableProto + .internal_static_google_cloud_bigquery_storage_v1beta2_TableFieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.class, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + type_ = 0; + mode_ = 0; + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + } else { + fields_ = null; + fieldsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + description_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.TableProto + .internal_static_google_cloud_bigquery_storage_v1beta2_TableFieldSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema build() { + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema result = + new com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema result) { + if (fieldsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.fields_ = fields_; + } else { + result.fields_ = fieldsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = type_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.mode_ = mode_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.description_ = description_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + if (other.mode_ != 0) { + setModeValue(other.getModeValue()); + } + if (fieldsBuilder_ == null) { + if (!other.fields_.isEmpty()) { + if (fields_.isEmpty()) { + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureFieldsIsMutable(); + fields_.addAll(other.fields_); + } + onChanged(); + } + } else { + if (!other.fields_.isEmpty()) { + if (fieldsBuilder_.isEmpty()) { + fieldsBuilder_.dispose(); + fieldsBuilder_ = null; + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000008); + fieldsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getFieldsFieldBuilder() + : null; + } else { + fieldsBuilder_.addAllMessages(other.fields_); + } + } + } + if (!other.getDescription().isEmpty()) { + description_ = other.description_; + bitField0_ |= 0x00000010; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + type_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + mode_ = input.readEnum(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.parser(), + extensionRegistry); + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(m); + } else { + fieldsBuilder_.addMessage(m); + } + break; + } // case 34 + case 50: + { + description_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int type_ = 0; + + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type getType() { + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type result = + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type.forNumber(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return This builder for chaining. + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = 0; + onChanged(); + return this; + } + + private int mode_ = 0; + + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + @java.lang.Override + public int getModeValue() { + return mode_; + } + + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for mode to set. + * @return This builder for chaining. + */ + public Builder setModeValue(int value) { + mode_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode getMode() { + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode result = + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode.forNumber(mode_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The mode to set. + * @return This builder for chaining. + */ + public Builder setMode(com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + mode_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearMode() { + bitField0_ = (bitField0_ & ~0x00000004); + mode_ = 0; + onChanged(); + return this; + } + + private java.util.List fields_ = + java.util.Collections.emptyList(); + + private void ensureFieldsIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + fields_ = + new java.util.ArrayList( + fields_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder> + fieldsBuilder_; + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsList() { + if (fieldsBuilder_ == null) { + return java.util.Collections.unmodifiableList(fields_); + } else { + return fieldsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getFieldsCount() { + if (fieldsBuilder_ == null) { + return fields_.size(); + } else { + return fieldsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema getFields(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.set(index, value); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.set(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields(com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(value); + onChanged(); + } else { + fieldsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(index, value); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllFields( + java.lang.Iterable + values) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); + onChanged(); + } else { + fieldsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearFields() { + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + fieldsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeFields(int index) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.remove(index); + onChanged(); + } else { + fieldsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder getFieldsBuilder( + int index) { + return getFieldsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder> + getFieldsOrBuilderList() { + if (fieldsBuilder_ != null) { + return fieldsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(fields_); + } + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder addFieldsBuilder() { + return getFieldsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder addFieldsBuilder( + int index) { + return getFieldsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsBuilderList() { + return getFieldsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder> + getFieldsFieldBuilder() { + if (fieldsBuilder_ == null) { + fieldsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder>( + fields_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); + fields_ = null; + } + return fieldsBuilder_; + } + + private java.lang.Object description_ = ""; + + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The description to set. + * @return This builder for chaining. + */ + public Builder setDescription(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + description_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDescription() { + description_ = getDefaultInstance().getDescription(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for description to set. + * @return This builder for chaining. + */ + public Builder setDescriptionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + description_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.TableFieldSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.TableFieldSchema) + private static final com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableFieldSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchemaOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchemaOrBuilder.java new file mode 100644 index 000000000000..82429015a2dc --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchemaOrBuilder.java @@ -0,0 +1,208 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/table.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface TableFieldSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.TableFieldSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The field name. The name must contain only letters (a-z, A-Z),
+   * numbers (0-9), or underscores (_), and must start with a letter or
+   * underscore. The maximum length is 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Required. The field name. The name must contain only letters (a-z, A-Z),
+   * numbers (0-9), or underscores (_), and must start with a letter or
+   * underscore. The maximum length is 128 characters.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Required. The field data type.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + + /** + * + * + *
+   * Required. The field data type.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Type getType(); + + /** + * + * + *
+   * Optional. The field mode. The default value is NULLABLE.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + int getModeValue(); + + /** + * + * + *
+   * Optional. The field mode. The default value is NULLABLE.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Mode getMode(); + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getFieldsList(); + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema getFields(int index); + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getFieldsCount(); + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getFieldsOrBuilderList(); + + /** + * + * + *
+   * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder getFieldsOrBuilder(int index); + + /** + * + * + *
+   * Optional. The field description. The maximum length is 1,024 characters.
+   * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + java.lang.String getDescription(); + + /** + * + * + *
+   * Optional. The field description. The maximum length is 1,024 characters.
+   * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + com.google.protobuf.ByteString getDescriptionBytes(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableName.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableName.java new file mode 100644 index 000000000000..84c1c7f92e33 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableName.java @@ -0,0 +1,217 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class TableName implements ResourceName { + private static final PathTemplate PROJECT_DATASET_TABLE = + PathTemplate.createWithoutUrlEncoding("projects/{project}/datasets/{dataset}/tables/{table}"); + private volatile Map fieldValuesMap; + private final String project; + private final String dataset; + private final String table; + + @Deprecated + protected TableName() { + project = null; + dataset = null; + table = null; + } + + private TableName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + dataset = Preconditions.checkNotNull(builder.getDataset()); + table = Preconditions.checkNotNull(builder.getTable()); + } + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static TableName of(String project, String dataset, String table) { + return newBuilder().setProject(project).setDataset(dataset).setTable(table).build(); + } + + public static String format(String project, String dataset, String table) { + return newBuilder().setProject(project).setDataset(dataset).setTable(table).build().toString(); + } + + public static TableName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_DATASET_TABLE.validatedMatch( + formattedString, "TableName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("dataset"), matchMap.get("table")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (TableName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_DATASET_TABLE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (dataset != null) { + fieldMapBuilder.put("dataset", dataset); + } + if (table != null) { + fieldMapBuilder.put("table", table); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_DATASET_TABLE.instantiate( + "project", project, "dataset", dataset, "table", table); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + TableName that = ((TableName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.dataset, that.dataset) + && Objects.equals(this.table, that.table); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(dataset); + h *= 1000003; + h ^= Objects.hashCode(table); + return h; + } + + /** Builder for projects/{project}/datasets/{dataset}/tables/{table}. */ + public static class Builder { + private String project; + private String dataset; + private String table; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setDataset(String dataset) { + this.dataset = dataset; + return this; + } + + public Builder setTable(String table) { + this.table = table; + return this; + } + + private Builder(TableName tableName) { + this.project = tableName.project; + this.dataset = tableName.dataset; + this.table = tableName.table; + } + + public TableName build() { + return new TableName(this); + } + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableProto.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableProto.java new file mode 100644 index 000000000000..50f020e37672 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableProto.java @@ -0,0 +1,122 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/table.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public final class TableProto { + private TableProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_TableSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_TableSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_TableFieldSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_TableFieldSchema_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "1google/cloud/bigquery/storage/v1beta2/table.proto\022%google.cloud.bigquery.stora" + + "ge.v1beta2\032\037google/api/field_behavior.proto\"V\n" + + "\013TableSchema\022G\n" + + "\006fields\030\001 \003(\01327.goo" + + "gle.cloud.bigquery.storage.v1beta2.TableFieldSchema\"\317\004\n" + + "\020TableFieldSchema\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\002\022O\n" + + "\004type\030\002 \001(\0162<.google.cloud" + + ".bigquery.storage.v1beta2.TableFieldSchema.TypeB\003\340A\002\022O\n" + + "\004mode\030\003 \001(\0162<.google.clou" + + "d.bigquery.storage.v1beta2.TableFieldSchema.ModeB\003\340A\001\022L\n" + + "\006fields\030\004 \003(\01327.google.c" + + "loud.bigquery.storage.v1beta2.TableFieldSchemaB\003\340A\001\022\030\n" + + "\013description\030\006 \001(\tB\003\340A\001\"\325\001\n" + + "\004Type\022\024\n" + + "\020TYPE_UNSPECIFIED\020\000\022\n\n" + + "\006STRING\020\001\022\t\n" + + "\005INT64\020\002\022\n\n" + + "\006DOUBLE\020\003\022\n\n" + + "\006STRUCT\020\004\022\t\n" + + "\005BYTES\020\005\022\010\n" + + "\004BOOL\020\006\022\r\n" + + "\tTIMESTAMP\020\007\022\010\n" + + "\004DATE\020\010\022\010\n" + + "\004TIME\020\t\022\014\n" + + "\010DATETIME\020\n" + + "\022\r\n" + + "\tGEOGRAPHY\020\013\022\013\n" + + "\007NUMERIC\020\014\022\016\n\n" + + "BIGNUMERIC\020\r" + + "\022\014\n" + + "\010INTERVAL\020\016\022\010\n" + + "\004JSON\020\017\"F\n" + + "\004Mode\022\024\n" + + "\020MODE_UNSPECIFIED\020\000\022\014\n" + + "\010NULLABLE\020\001\022\014\n" + + "\010REQUIRED\020\002\022\014\n" + + "\010REPEATED\020\003B~\n" + + ")com.google.cloud.bigquery.storage.v1beta2B\n" + + "TableProtoP\001ZCcloud.google.com/go/bigquery/storage/apiv1beta2/storag" + + "epb;storagepbb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1beta2_TableSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_TableSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_TableSchema_descriptor, + new java.lang.String[] { + "Fields", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_TableFieldSchema_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta2_TableFieldSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_TableFieldSchema_descriptor, + new java.lang.String[] { + "Name", "Type", "Mode", "Fields", "Description", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchema.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchema.java new file mode 100644 index 000000000000..3d94059bbd02 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchema.java @@ -0,0 +1,981 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/table.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Schema of a table
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.TableSchema} + */ +public final class TableSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.TableSchema) + TableSchemaOrBuilder { + private static final long serialVersionUID = 0L; + + // Use TableSchema.newBuilder() to construct. + private TableSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableSchema() { + fields_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableSchema(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.TableProto + .internal_static_google_cloud_bigquery_storage_v1beta2_TableSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.TableProto + .internal_static_google_cloud_bigquery_storage_v1beta2_TableSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.TableSchema.class, + com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder.class); + } + + public static final int FIELDS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List fields_; + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + @java.lang.Override + public java.util.List + getFieldsList() { + return fields_; + } + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder> + getFieldsOrBuilderList() { + return fields_; + } + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + @java.lang.Override + public int getFieldsCount() { + return fields_.size(); + } + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema getFields(int index) { + return fields_.get(index); + } + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + return fields_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < fields_.size(); i++) { + output.writeMessage(1, fields_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < fields_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, fields_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.TableSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.TableSchema other = + (com.google.cloud.bigquery.storage.v1beta2.TableSchema) obj; + + if (!getFieldsList().equals(other.getFieldsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getFieldsCount() > 0) { + hash = (37 * hash) + FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getFieldsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.TableSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Schema of a table
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.TableSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.TableSchema) + com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.TableProto + .internal_static_google_cloud_bigquery_storage_v1beta2_TableSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.TableProto + .internal_static_google_cloud_bigquery_storage_v1beta2_TableSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.TableSchema.class, + com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.TableSchema.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + } else { + fields_ = null; + fieldsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.TableProto + .internal_static_google_cloud_bigquery_storage_v1beta2_TableSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.TableSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableSchema build() { + com.google.cloud.bigquery.storage.v1beta2.TableSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableSchema buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.TableSchema result = + new com.google.cloud.bigquery.storage.v1beta2.TableSchema(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.bigquery.storage.v1beta2.TableSchema result) { + if (fieldsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.fields_ = fields_; + } else { + result.fields_ = fieldsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.TableSchema result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.TableSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.TableSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.TableSchema other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.TableSchema.getDefaultInstance()) + return this; + if (fieldsBuilder_ == null) { + if (!other.fields_.isEmpty()) { + if (fields_.isEmpty()) { + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureFieldsIsMutable(); + fields_.addAll(other.fields_); + } + onChanged(); + } + } else { + if (!other.fields_.isEmpty()) { + if (fieldsBuilder_.isEmpty()) { + fieldsBuilder_.dispose(); + fieldsBuilder_ = null; + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + fieldsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getFieldsFieldBuilder() + : null; + } else { + fieldsBuilder_.addAllMessages(other.fields_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema m = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.parser(), + extensionRegistry); + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(m); + } else { + fieldsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List fields_ = + java.util.Collections.emptyList(); + + private void ensureFieldsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + fields_ = + new java.util.ArrayList( + fields_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder> + fieldsBuilder_; + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public java.util.List + getFieldsList() { + if (fieldsBuilder_ == null) { + return java.util.Collections.unmodifiableList(fields_); + } else { + return fieldsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public int getFieldsCount() { + if (fieldsBuilder_ == null) { + return fields_.size(); + } else { + return fieldsBuilder_.getCount(); + } + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema getFields(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.set(index, value); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public Builder setFields( + int index, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.set(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public Builder addFields(com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(value); + onChanged(); + } else { + fieldsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(index, value); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public Builder addFields( + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public Builder addFields( + int index, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public Builder addAllFields( + java.lang.Iterable + values) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); + onChanged(); + } else { + fieldsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public Builder clearFields() { + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + fieldsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public Builder removeFields(int index) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.remove(index); + onChanged(); + } else { + fieldsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder getFieldsBuilder( + int index) { + return getFieldsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder> + getFieldsOrBuilderList() { + if (fieldsBuilder_ != null) { + return fieldsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(fields_); + } + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder addFieldsBuilder() { + return getFieldsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder addFieldsBuilder( + int index) { + return getFieldsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.getDefaultInstance()); + } + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + public java.util.List + getFieldsBuilderList() { + return getFieldsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder> + getFieldsFieldBuilder() { + if (fieldsBuilder_ == null) { + fieldsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder>( + fields_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + fields_ = null; + } + return fieldsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.TableSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.TableSchema) + private static final com.google.cloud.bigquery.storage.v1beta2.TableSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.TableSchema(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.TableSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchemaOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchemaOrBuilder.java new file mode 100644 index 000000000000..365e47765d79 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchemaOrBuilder.java @@ -0,0 +1,82 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/table.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface TableSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.TableSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + java.util.List getFieldsList(); + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchema getFields(int index); + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + int getFieldsCount(); + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + java.util.List + getFieldsOrBuilderList(); + + /** + * + * + *
+   * Describes the fields in a table.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1beta2.TableFieldSchema fields = 1; + */ + com.google.cloud.bigquery.storage.v1beta2.TableFieldSchemaOrBuilder getFieldsOrBuilder(int index); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleState.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleState.java new file mode 100644 index 000000000000..0dade9034480 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleState.java @@ -0,0 +1,546 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Information on if the current connection is being throttled.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ThrottleState} + */ +public final class ThrottleState extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ThrottleState) + ThrottleStateOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ThrottleState.newBuilder() to construct. + private ThrottleState(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ThrottleState() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ThrottleState(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.class, + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder.class); + } + + public static final int THROTTLE_PERCENT_FIELD_NUMBER = 1; + private int throttlePercent_ = 0; + + /** + * + * + *
+   * How much this connection is being throttled. Zero means no throttling,
+   * 100 means fully throttled.
+   * 
+ * + * int32 throttle_percent = 1; + * + * @return The throttlePercent. + */ + @java.lang.Override + public int getThrottlePercent() { + return throttlePercent_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (throttlePercent_ != 0) { + output.writeInt32(1, throttlePercent_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (throttlePercent_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, throttlePercent_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ThrottleState)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ThrottleState other = + (com.google.cloud.bigquery.storage.v1beta2.ThrottleState) obj; + + if (getThrottlePercent() != other.getThrottlePercent()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + THROTTLE_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getThrottlePercent(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ThrottleState prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information on if the current connection is being throttled.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ThrottleState} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ThrottleState) + com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.class, + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ThrottleState.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + throttlePercent_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ThrottleState.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState build() { + com.google.cloud.bigquery.storage.v1beta2.ThrottleState result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ThrottleState result = + new com.google.cloud.bigquery.storage.v1beta2.ThrottleState(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.ThrottleState result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.throttlePercent_ = throttlePercent_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ThrottleState) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ThrottleState) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ThrottleState other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ThrottleState.getDefaultInstance()) + return this; + if (other.getThrottlePercent() != 0) { + setThrottlePercent(other.getThrottlePercent()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + throttlePercent_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int throttlePercent_; + + /** + * + * + *
+     * How much this connection is being throttled. Zero means no throttling,
+     * 100 means fully throttled.
+     * 
+ * + * int32 throttle_percent = 1; + * + * @return The throttlePercent. + */ + @java.lang.Override + public int getThrottlePercent() { + return throttlePercent_; + } + + /** + * + * + *
+     * How much this connection is being throttled. Zero means no throttling,
+     * 100 means fully throttled.
+     * 
+ * + * int32 throttle_percent = 1; + * + * @param value The throttlePercent to set. + * @return This builder for chaining. + */ + public Builder setThrottlePercent(int value) { + + throttlePercent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * How much this connection is being throttled. Zero means no throttling,
+     * 100 means fully throttled.
+     * 
+ * + * int32 throttle_percent = 1; + * + * @return This builder for chaining. + */ + public Builder clearThrottlePercent() { + bitField0_ = (bitField0_ & ~0x00000001); + throttlePercent_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ThrottleState) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ThrottleState) + private static final com.google.cloud.bigquery.storage.v1beta2.ThrottleState DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ThrottleState(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ThrottleState parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleStateOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleStateOrBuilder.java new file mode 100644 index 000000000000..68f0ac81498a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleStateOrBuilder.java @@ -0,0 +1,40 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ThrottleStateOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ThrottleState) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * How much this connection is being throttled. Zero means no throttling,
+   * 100 means fully throttled.
+   * 
+ * + * int32 throttle_percent = 1; + * + * @return The throttlePercent. + */ + int getThrottlePercent(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStream.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStream.java new file mode 100644 index 000000000000..379c5e39440d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStream.java @@ -0,0 +1,2002 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Information about a single stream that gets data inside the storage system.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.WriteStream} + */ +public final class WriteStream extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.WriteStream) + WriteStreamOrBuilder { + private static final long serialVersionUID = 0L; + + // Use WriteStream.newBuilder() to construct. + private WriteStream(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private WriteStream() { + name_ = ""; + type_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new WriteStream(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_WriteStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_WriteStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.WriteStream.class, + com.google.cloud.bigquery.storage.v1beta2.WriteStream.Builder.class); + } + + /** + * + * + *
+   * Type enum of the stream.
+   * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1beta2.WriteStream.Type} + */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Unknown type.
+     * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
+     * Data will commit automatically and appear as soon as the write is
+     * acknowledged.
+     * 
+ * + * COMMITTED = 1; + */ + COMMITTED(1), + /** + * + * + *
+     * Data is invisible until the stream is committed.
+     * 
+ * + * PENDING = 2; + */ + PENDING(2), + /** + * + * + *
+     * Data is only visible up to the offset to which it was flushed.
+     * 
+ * + * BUFFERED = 3; + */ + BUFFERED(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Unknown type.
+     * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
+     * Data will commit automatically and appear as soon as the write is
+     * acknowledged.
+     * 
+ * + * COMMITTED = 1; + */ + public static final int COMMITTED_VALUE = 1; + + /** + * + * + *
+     * Data is invisible until the stream is committed.
+     * 
+ * + * PENDING = 2; + */ + public static final int PENDING_VALUE = 2; + + /** + * + * + *
+     * Data is only visible up to the offset to which it was flushed.
+     * 
+ * + * BUFFERED = 3; + */ + public static final int BUFFERED_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return COMMITTED; + case 2: + return PENDING; + case 3: + return BUFFERED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.WriteStream.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1beta2.WriteStream.Type) + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private int type_ = 0; + + /** + * + * + *
+   * Immutable. Type of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
+   * Immutable. Type of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.WriteStream.Type getType() { + com.google.cloud.bigquery.storage.v1beta2.WriteStream.Type result = + com.google.cloud.bigquery.storage.v1beta2.WriteStream.Type.forNumber(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.WriteStream.Type.UNRECOGNIZED + : result; + } + + public static final int CREATE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is the
+   * creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is the
+   * creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is the
+   * creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int COMMIT_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp commitTime_; + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the commitTime field is set. + */ + @java.lang.Override + public boolean hasCommitTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The commitTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTime() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + + public static final int TABLE_SCHEMA_FIELD_NUMBER = 5; + private com.google.cloud.bigquery.storage.v1beta2.TableSchema tableSchema_; + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the tableSchema field is set. + */ + @java.lang.Override + public boolean hasTableSchema() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The tableSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableSchema getTableSchema() { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1beta2.TableSchema.getDefaultInstance() + : tableSchema_; + } + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder getTableSchemaOrBuilder() { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1beta2.TableSchema.getDefaultInstance() + : tableSchema_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1beta2.WriteStream.Type.TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, type_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getCommitTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(5, getTableSchema()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1beta2.WriteStream.Type.TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, type_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCommitTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getTableSchema()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.WriteStream)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.WriteStream other = + (com.google.cloud.bigquery.storage.v1beta2.WriteStream) obj; + + if (!getName().equals(other.getName())) return false; + if (type_ != other.type_) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasCommitTime() != other.hasCommitTime()) return false; + if (hasCommitTime()) { + if (!getCommitTime().equals(other.getCommitTime())) return false; + } + if (hasTableSchema() != other.hasTableSchema()) return false; + if (hasTableSchema()) { + if (!getTableSchema().equals(other.getTableSchema())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasCommitTime()) { + hash = (37 * hash) + COMMIT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCommitTime().hashCode(); + } + if (hasTableSchema()) { + hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getTableSchema().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.WriteStream prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Information about a single stream that gets data inside the storage system.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.WriteStream} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.WriteStream) + com.google.cloud.bigquery.storage.v1beta2.WriteStreamOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_WriteStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_WriteStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.WriteStream.class, + com.google.cloud.bigquery.storage.v1beta2.WriteStream.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.WriteStream.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getCreateTimeFieldBuilder(); + getCommitTimeFieldBuilder(); + getTableSchemaFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + type_ = 0; + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + commitTime_ = null; + if (commitTimeBuilder_ != null) { + commitTimeBuilder_.dispose(); + commitTimeBuilder_ = null; + } + tableSchema_ = null; + if (tableSchemaBuilder_ != null) { + tableSchemaBuilder_.dispose(); + tableSchemaBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_WriteStream_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.WriteStream getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.WriteStream.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.WriteStream build() { + com.google.cloud.bigquery.storage.v1beta2.WriteStream result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.WriteStream buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.WriteStream result = + new com.google.cloud.bigquery.storage.v1beta2.WriteStream(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.bigquery.storage.v1beta2.WriteStream result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = type_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.commitTime_ = commitTimeBuilder_ == null ? commitTime_ : commitTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.tableSchema_ = + tableSchemaBuilder_ == null ? tableSchema_ : tableSchemaBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.WriteStream) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.WriteStream) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.WriteStream other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.WriteStream.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasCommitTime()) { + mergeCommitTime(other.getCommitTime()); + } + if (other.hasTableSchema()) { + mergeTableSchema(other.getTableSchema()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + type_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + input.readMessage(getCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage(getCommitTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage(getTableSchemaFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int type_ = 0; + + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The type. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.WriteStream.Type getType() { + com.google.cloud.bigquery.storage.v1beta2.WriteStream.Type result = + com.google.cloud.bigquery.storage.v1beta2.WriteStream.Type.forNumber(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.WriteStream.Type.UNRECOGNIZED + : result; + } + + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(com.google.cloud.bigquery.storage.v1beta2.WriteStream.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+     * Immutable. Type of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return This builder for chaining. + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000004); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp commitTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimeBuilder_; + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the commitTime field is set. + */ + public boolean hasCommitTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The commitTime. + */ + public com.google.protobuf.Timestamp getCommitTime() { + if (commitTimeBuilder_ == null) { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } else { + return commitTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTime_ = value; + } else { + commitTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimeBuilder_ == null) { + commitTime_ = builderForValue.build(); + } else { + commitTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && commitTime_ != null + && commitTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCommitTimeBuilder().mergeFrom(value); + } else { + commitTime_ = value; + } + } else { + commitTimeBuilder_.mergeFrom(value); + } + if (commitTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCommitTime() { + bitField0_ = (bitField0_ & ~0x00000008); + commitTime_ = null; + if (commitTimeBuilder_ != null) { + commitTimeBuilder_.dispose(); + commitTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getCommitTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + if (commitTimeBuilder_ != null) { + return commitTimeBuilder_.getMessageOrBuilder(); + } else { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } + } + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCommitTimeFieldBuilder() { + if (commitTimeBuilder_ == null) { + commitTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTime(), getParentForChildren(), isClean()); + commitTime_ = null; + } + return commitTimeBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta2.TableSchema tableSchema_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.TableSchema, + com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder> + tableSchemaBuilder_; + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the tableSchema field is set. + */ + public boolean hasTableSchema() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The tableSchema. + */ + public com.google.cloud.bigquery.storage.v1beta2.TableSchema getTableSchema() { + if (tableSchemaBuilder_ == null) { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1beta2.TableSchema.getDefaultInstance() + : tableSchema_; + } else { + return tableSchemaBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setTableSchema(com.google.cloud.bigquery.storage.v1beta2.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableSchema_ = value; + } else { + tableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setTableSchema( + com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder builderForValue) { + if (tableSchemaBuilder_ == null) { + tableSchema_ = builderForValue.build(); + } else { + tableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeTableSchema(com.google.cloud.bigquery.storage.v1beta2.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && tableSchema_ != null + && tableSchema_ + != com.google.cloud.bigquery.storage.v1beta2.TableSchema.getDefaultInstance()) { + getTableSchemaBuilder().mergeFrom(value); + } else { + tableSchema_ = value; + } + } else { + tableSchemaBuilder_.mergeFrom(value); + } + if (tableSchema_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearTableSchema() { + bitField0_ = (bitField0_ & ~0x00000010); + tableSchema_ = null; + if (tableSchemaBuilder_ != null) { + tableSchemaBuilder_.dispose(); + tableSchemaBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder getTableSchemaBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getTableSchemaFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder + getTableSchemaOrBuilder() { + if (tableSchemaBuilder_ != null) { + return tableSchemaBuilder_.getMessageOrBuilder(); + } else { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1beta2.TableSchema.getDefaultInstance() + : tableSchema_; + } + } + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.TableSchema, + com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder> + getTableSchemaFieldBuilder() { + if (tableSchemaBuilder_ == null) { + tableSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.TableSchema, + com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder>( + getTableSchema(), getParentForChildren(), isClean()); + tableSchema_ = null; + } + return tableSchemaBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.WriteStream) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.WriteStream) + private static final com.google.cloud.bigquery.storage.v1beta2.WriteStream DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.WriteStream(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.WriteStream getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WriteStream parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.WriteStream getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamName.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamName.java new file mode 100644 index 000000000000..3cce937c8a6d --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamName.java @@ -0,0 +1,257 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class WriteStreamName implements ResourceName { + private static final PathTemplate PROJECT_DATASET_TABLE_STREAM = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}"); + private volatile Map fieldValuesMap; + private final String project; + private final String dataset; + private final String table; + private final String stream; + + @Deprecated + protected WriteStreamName() { + project = null; + dataset = null; + table = null; + stream = null; + } + + private WriteStreamName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + dataset = Preconditions.checkNotNull(builder.getDataset()); + table = Preconditions.checkNotNull(builder.getTable()); + stream = Preconditions.checkNotNull(builder.getStream()); + } + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public String getStream() { + return stream; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static WriteStreamName of(String project, String dataset, String table, String stream) { + return newBuilder() + .setProject(project) + .setDataset(dataset) + .setTable(table) + .setStream(stream) + .build(); + } + + public static String format(String project, String dataset, String table, String stream) { + return newBuilder() + .setProject(project) + .setDataset(dataset) + .setTable(table) + .setStream(stream) + .build() + .toString(); + } + + public static WriteStreamName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_DATASET_TABLE_STREAM.validatedMatch( + formattedString, "WriteStreamName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("dataset"), + matchMap.get("table"), + matchMap.get("stream")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (WriteStreamName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_DATASET_TABLE_STREAM.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (dataset != null) { + fieldMapBuilder.put("dataset", dataset); + } + if (table != null) { + fieldMapBuilder.put("table", table); + } + if (stream != null) { + fieldMapBuilder.put("stream", stream); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_DATASET_TABLE_STREAM.instantiate( + "project", project, "dataset", dataset, "table", table, "stream", stream); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + WriteStreamName that = ((WriteStreamName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.dataset, that.dataset) + && Objects.equals(this.table, that.table) + && Objects.equals(this.stream, that.stream); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(dataset); + h *= 1000003; + h ^= Objects.hashCode(table); + h *= 1000003; + h ^= Objects.hashCode(stream); + return h; + } + + /** Builder for projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}. */ + public static class Builder { + private String project; + private String dataset; + private String table; + private String stream; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getDataset() { + return dataset; + } + + public String getTable() { + return table; + } + + public String getStream() { + return stream; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setDataset(String dataset) { + this.dataset = dataset; + return this; + } + + public Builder setTable(String table) { + this.table = table; + return this; + } + + public Builder setStream(String stream) { + this.stream = stream; + return this; + } + + private Builder(WriteStreamName writeStreamName) { + this.project = writeStreamName.project; + this.dataset = writeStreamName.dataset; + this.table = writeStreamName.table; + this.stream = writeStreamName.stream; + } + + public WriteStreamName build() { + return new WriteStreamName(this); + } + } +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamOrBuilder.java b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamOrBuilder.java new file mode 100644 index 000000000000..5249ae133678 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamOrBuilder.java @@ -0,0 +1,228 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +// Protobuf Java Version: 3.25.8 +package com.google.cloud.bigquery.storage.v1beta2; + +public interface WriteStreamOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.WriteStream) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Immutable. Type of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + + /** + * + * + *
+   * Immutable. Type of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The type. + */ + com.google.cloud.bigquery.storage.v1beta2.WriteStream.Type getType(); + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is the
+   * creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is the
+   * creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
+   * Output only. Create time of the stream. For the _default stream, this is the
+   * creation_time of the table.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the commitTime field is set. + */ + boolean hasCommitTime(); + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The commitTime. + */ + com.google.protobuf.Timestamp getCommitTime(); + + /** + * + * + *
+   * Output only. Commit time of the stream.
+   * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+   * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+   * means it is not committed.
+   * 
+ * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the tableSchema field is set. + */ + boolean hasTableSchema(); + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The tableSchema. + */ + com.google.cloud.bigquery.storage.v1beta2.TableSchema getTableSchema(); + + /** + * + * + *
+   * Output only. The schema of the destination table. It is only returned in
+   * `CreateWriteStream` response. Caller should generate data that's
+   * compatible with this schema to send in initial `AppendRowsRequest`.
+   * The table schema could go out of date during the life time of the stream.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.TableSchemaOrBuilder getTableSchemaOrBuilder(); +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/arrow.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/arrow.proto new file mode 100644 index 000000000000..7d17d559e244 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/arrow.proto @@ -0,0 +1,57 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta2; + +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta2/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "ArrowProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta2"; + +// Arrow schema as specified in +// https://arrow.apache.org/docs/python/api/datatypes.html +// and serialized to bytes using IPC: +// https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc +// +// See code samples on how this message can be deserialized. +message ArrowSchema { + // IPC serialized Arrow schema. + bytes serialized_schema = 1; +} + +// Arrow RecordBatch. +message ArrowRecordBatch { + // IPC-serialized Arrow RecordBatch. + bytes serialized_record_batch = 1; +} + +// Contains options specific to Arrow Serialization. +message ArrowSerializationOptions { + // The IPC format to use when serializing Arrow streams. + enum Format { + // If unspecied the IPC format as of 0.15 release will be used. + FORMAT_UNSPECIFIED = 0; + + // Use the legacy IPC message format as of Apache Arrow Release 0.14. + ARROW_0_14 = 1; + + // Use the message format as of Apache Arrow Release 0.15. + ARROW_0_15 = 2; + } + + // The Arrow IPC format to use. + Format format = 1; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/avro.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/avro.proto new file mode 100644 index 000000000000..bd48a5cd0d80 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/avro.proto @@ -0,0 +1,35 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta2; + +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta2/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "AvroProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta2"; + +// Avro schema. +message AvroSchema { + // Json serialized schema, as described at + // https://avro.apache.org/docs/1.8.1/spec.html. + string schema = 1; +} + +// Avro rows. +message AvroRows { + // Binary serialized rows in a block. + bytes serialized_binary_rows = 1; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/protobuf.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/protobuf.proto new file mode 100644 index 000000000000..cdc77e7e6618 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/protobuf.proto @@ -0,0 +1,40 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta2; + +import "google/protobuf/descriptor.proto"; + +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta2/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "ProtoBufProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta2"; + +// ProtoSchema describes the schema of the serialized protocol buffer data rows. +message ProtoSchema { + // Descriptor for input message. The descriptor has to be self contained, + // including all the nested types, excepted for proto buffer well known types + // (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf). + google.protobuf.DescriptorProto proto_descriptor = 1; +} + +message ProtoRows { + // A sequence of rows serialized as a Protocol Buffer. + // + // See https://developers.google.com/protocol-buffers/docs/overview for more + // information on deserializing this field. + repeated bytes serialized_rows = 1; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/storage.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/storage.proto new file mode 100644 index 000000000000..35fb37a8202c --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/storage.proto @@ -0,0 +1,577 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1beta2/arrow.proto"; +import "google/cloud/bigquery/storage/v1beta2/avro.proto"; +import "google/cloud/bigquery/storage/v1beta2/protobuf.proto"; +import "google/cloud/bigquery/storage/v1beta2/stream.proto"; +import "google/cloud/bigquery/storage/v1beta2/table.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta2/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "StorageProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta2"; + +// BigQuery Read API. +// +// The Read API can be used to read data from BigQuery. +// +// New code should use the v1 Read API going forward, if they don't use Write +// API at the same time. +service BigQueryRead { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a new read session. A read session divides the contents of a + // BigQuery table into one or more streams, which can then be used to read + // data from the table. The read session also specifies properties of the + // data to be read, such as a list of columns or a push-down filter describing + // the rows to be returned. + // + // A particular row can be read by at most one stream. When the caller has + // reached the end of each stream in the session, then all the data in the + // table has been read. + // + // Data is assigned to each stream such that roughly the same number of + // rows can be read from each stream. Because the server-side unit for + // assigning data is collections of rows, the API does not guarantee that + // each stream will return the same number or rows. Additionally, the + // limits are enforced based on the number of pre-filtered rows, so some + // filters can lead to lopsided assignments. + // + // Read sessions automatically expire 6 hours after they are created and do + // not require manual clean-up by the caller. + rpc CreateReadSession(CreateReadSessionRequest) returns (ReadSession) { + option (google.api.http) = { + post: "/v1beta2/{read_session.table=projects/*/datasets/*/tables/*}" + body: "*" + }; + option (google.api.method_signature) = + "parent,read_session,max_stream_count"; + } + + // Reads rows from the stream in the format prescribed by the ReadSession. + // Each response contains one or more table rows, up to a maximum of 100 MiB + // per response; read requests which attempt to read individual rows larger + // than 100 MiB will fail. + // + // Each request also returns a set of stream statistics reflecting the current + // state of the stream. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { + get: "/v1beta2/{read_stream=projects/*/locations/*/sessions/*/streams/*}" + }; + option (google.api.method_signature) = "read_stream,offset"; + } + + // Splits a given `ReadStream` into two `ReadStream` objects. These + // `ReadStream` objects are referred to as the primary and the residual + // streams of the split. The original `ReadStream` can still be read from in + // the same manner as before. Both of the returned `ReadStream` objects can + // also be read from, and the rows returned by both child streams will be + // the same as the rows read from the original stream. + // + // Moreover, the two child streams will be allocated back-to-back in the + // original `ReadStream`. Concretely, it is guaranteed that for streams + // original, primary, and residual, that original[0-j] = primary[0-j] and + // original[j-n] = residual[0-m] once the streams have been read to + // completion. + rpc SplitReadStream(SplitReadStreamRequest) + returns (SplitReadStreamResponse) { + option (google.api.http) = { + get: "/v1beta2/{name=projects/*/locations/*/sessions/*/streams/*}" + }; + } +} + +// BigQuery Write API. +// +// The Write API can be used to write data to BigQuery. +// +// +// The [google.cloud.bigquery.storage.v1 +// API](/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1) +// should be used instead of the v1beta2 API for BigQueryWrite operations. +service BigQueryWrite { + option deprecated = true; + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/bigquery.insertdata," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a write stream to the given table. + // Additionally, every table has a special COMMITTED stream named '_default' + // to which data can be written. This stream doesn't need to be created using + // CreateWriteStream. It is a stream that can be used simultaneously by any + // number of clients. Data written to this stream is considered committed as + // soon as an acknowledgement is received. + rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) { + option deprecated = true; + option (google.api.http) = { + post: "/v1beta2/{parent=projects/*/datasets/*/tables/*}" + body: "write_stream" + }; + option (google.api.method_signature) = "parent,write_stream"; + } + + // Appends data to the given stream. + // + // If `offset` is specified, the `offset` is checked against the end of + // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an + // attempt is made to append to an offset beyond the current end of the stream + // or `ALREADY_EXISTS` if user provids an `offset` that has already been + // written to. User can retry with adjusted offset within the same RPC + // stream. If `offset` is not specified, append happens at the end of the + // stream. + // + // The response contains the offset at which the append happened. Responses + // are received in the same order in which requests are sent. There will be + // one response for each successful request. If the `offset` is not set in + // response, it means append didn't happen due to some errors. If one request + // fails, all the subsequent requests will also fail until a success request + // is made again. + // + // If the stream is of `PENDING` type, data will only be available for read + // operations after the stream is committed. + rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) { + option deprecated = true; + option (google.api.http) = { + post: "/v1beta2/{write_stream=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "write_stream"; + } + + // Gets a write stream. + rpc GetWriteStream(GetWriteStreamRequest) returns (WriteStream) { + option deprecated = true; + option (google.api.http) = { + post: "/v1beta2/{name=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Finalize a write stream so that no new data can be appended to the + // stream. Finalize is not supported on the '_default' stream. + rpc FinalizeWriteStream(FinalizeWriteStreamRequest) + returns (FinalizeWriteStreamResponse) { + option deprecated = true; + option (google.api.http) = { + post: "/v1beta2/{name=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Atomically commits a group of `PENDING` streams that belong to the same + // `parent` table. + // Streams must be finalized before commit and cannot be committed multiple + // times. Once a stream is committed, data in the stream becomes available + // for read operations. + rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) + returns (BatchCommitWriteStreamsResponse) { + option deprecated = true; + option (google.api.http) = { + get: "/v1beta2/{parent=projects/*/datasets/*/tables/*}" + }; + option (google.api.method_signature) = "parent"; + } + + // Flushes rows to a BUFFERED stream. + // If users are appending rows to BUFFERED stream, flush operation is + // required in order for the rows to become available for reading. A + // Flush operation flushes up to any previously flushed offset in a BUFFERED + // stream, to the offset specified in the request. + // Flush is not supported on the _default stream, since it is not BUFFERED. + rpc FlushRows(FlushRowsRequest) returns (FlushRowsResponse) { + option deprecated = true; + option (google.api.http) = { + post: "/v1beta2/{write_stream=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "write_stream"; + } +} + +// Request message for `CreateReadSession`. +message CreateReadSessionRequest { + // Required. The request project that owns the session, in the form of + // `projects/{project_id}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Required. Session to be created. + ReadSession read_session = 2 [(google.api.field_behavior) = REQUIRED]; + + // Max initial number of streams. If unset or zero, the server will + // provide a value of streams so as to produce reasonable throughput. Must be + // non-negative. The number of streams may be lower than the requested number, + // depending on the amount parallelism that is reasonable for the table. Error + // will be returned if the max count is greater than the current system + // max limit of 1,000. + // + // Streams must be read starting from offset 0. + int32 max_stream_count = 3; +} + +// Request message for `ReadRows`. +message ReadRowsRequest { + // Required. Stream to read rows from. + string read_stream = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/ReadStream" + } + ]; + + // The offset requested must be less than the last row read from Read. + // Requesting a larger offset is undefined. If not specified, start reading + // from offset zero. + int64 offset = 2; +} + +// Information on if the current connection is being throttled. +message ThrottleState { + // How much this connection is being throttled. Zero means no throttling, + // 100 means fully throttled. + int32 throttle_percent = 1; +} + +// Estimated stream statistics for a given Stream. +message StreamStats { + message Progress { + // The fraction of rows assigned to the stream that have been processed by + // the server so far, not including the rows in the current response + // message. + // + // This value, along with `at_response_end`, can be used to interpolate + // the progress made as the rows in the message are being processed using + // the following formula: `at_response_start + (at_response_end - + // at_response_start) * rows_processed_from_response / rows_in_response`. + // + // Note that if a filter is provided, the `at_response_end` value of the + // previous response may not necessarily be equal to the + // `at_response_start` value of the current response. + double at_response_start = 1; + + // Similar to `at_response_start`, except that this value includes the + // rows in the current response. + double at_response_end = 2; + } + + // Represents the progress of the current stream. + Progress progress = 2; +} + +// Response from calling `ReadRows` may include row data, progress and +// throttling information. +message ReadRowsResponse { + // Row data is returned in format specified during session creation. + oneof rows { + // Serialized row data in AVRO format. + AvroRows avro_rows = 3; + + // Serialized row data in Arrow RecordBatch format. + ArrowRecordBatch arrow_record_batch = 4; + } + + // Number of serialized rows in the rows block. + int64 row_count = 6; + + // Statistics for the stream. + StreamStats stats = 2; + + // Throttling state. If unset, the latest response still describes + // the current throttling status. + ThrottleState throttle_state = 5; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. This schema is equivalent to the one returned by + // CreateSession. This field is only populated in the first ReadRowsResponse + // RPC. + oneof schema { + // Output only. Avro schema. + AvroSchema avro_schema = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Arrow schema. + ArrowSchema arrow_schema = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + } +} + +// Request message for `SplitReadStream`. +message SplitReadStreamRequest { + // Required. Name of the stream to split. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/ReadStream" + } + ]; + + // A value in the range (0.0, 1.0) that specifies the fractional point at + // which the original stream should be split. The actual split point is + // evaluated on pre-filtered rows, so if a filter is provided, then there is + // no guarantee that the division of the rows between the new child streams + // will be proportional to this fractional value. Additionally, because the + // server-side unit for assigning data is collections of rows, this fraction + // will always map to a data storage boundary on the server side. + double fraction = 2; +} + +message SplitReadStreamResponse { + // Primary stream, which contains the beginning portion of + // |original_stream|. An empty value indicates that the original stream can no + // longer be split. + ReadStream primary_stream = 1; + + // Remainder stream, which contains the tail of |original_stream|. An empty + // value indicates that the original stream can no longer be split. + ReadStream remainder_stream = 2; +} + +// Request message for `CreateWriteStream`. +message CreateWriteStreamRequest { + // Required. Reference to the table to which the stream belongs, in the format + // of `projects/{project}/datasets/{dataset}/tables/{table}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + ]; + + // Required. Stream to be created. + WriteStream write_stream = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for `AppendRows`. +message AppendRowsRequest { + // Proto schema and data. + message ProtoData { + // Proto schema used to serialize the data. + ProtoSchema writer_schema = 1; + + // Serialized row data in protobuf message format. + ProtoRows rows = 2; + } + + // Required. The stream that is the target of the append operation. This value + // must be specified for the initial request. If subsequent requests specify + // the stream name, it must equal to the value provided in the first request. + // To write to the _default stream, populate this field with a string in the + // format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. + string write_stream = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; + + // If present, the write is only performed if the next append offset is same + // as the provided value. If not present, the write is performed at the + // current end of stream. Specifying a value for this field is not allowed + // when calling AppendRows for the '_default' stream. + google.protobuf.Int64Value offset = 2; + + // Input rows. The `writer_schema` field must be specified at the initial + // request and currently, it will be ignored if specified in following + // requests. Following requests must have data in the same format as the + // initial request. + oneof rows { + // Rows in proto format. + ProtoData proto_rows = 4; + } + + // Id set by client to annotate its identity. Only initial request setting is + // respected. + string trace_id = 6; +} + +// Response message for `AppendRows`. +message AppendRowsResponse { + // AppendResult is returned for successful append requests. + message AppendResult { + // The row offset at which the last append occurred. The offset will not be + // set if appending using default streams. + google.protobuf.Int64Value offset = 1; + } + + oneof response { + // Result if the append is successful. + AppendResult append_result = 1; + + // Error returned when problems were encountered. If present, + // it indicates rows were not accepted into the system. + // Users can retry or continue with other append requests within the + // same connection. + // + // Additional information about error signalling: + // + // ALREADY_EXISTS: Happens when an append specified an offset, and the + // backend already has received data at this offset. Typically encountered + // in retry scenarios, and can be ignored. + // + // OUT_OF_RANGE: Returned when the specified offset in the stream is beyond + // the current end of the stream. + // + // INVALID_ARGUMENT: Indicates a malformed request or data. + // + // ABORTED: Request processing is aborted because of prior failures. The + // request can be retried if previous failure is addressed. + // + // INTERNAL: Indicates server side error(s) that can be retried. + google.rpc.Status error = 2; + } + + // If backend detects a schema update, pass it to user so that user can + // use it to input new type of message. It will be empty when no schema + // updates have occurred. + TableSchema updated_schema = 3; +} + +// Request message for `GetWriteStreamRequest`. +message GetWriteStreamRequest { + // Required. Name of the stream to get, in the form of + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; +} + +// Request message for `BatchCommitWriteStreams`. +message BatchCommitWriteStreamsRequest { + // Required. Parent table that all the streams should belong to, in the form + // of `projects/{project}/datasets/{dataset}/tables/{table}`. + string parent = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The group of streams that will be committed atomically. + repeated string write_streams = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Response message for `BatchCommitWriteStreams`. +message BatchCommitWriteStreamsResponse { + // The time at which streams were committed in microseconds granularity. + // This field will only exist when there are no stream errors. + // **Note** if this field is not set, it means the commit was not successful. + google.protobuf.Timestamp commit_time = 1; + + // Stream level error if commit failed. Only streams with error will be in + // the list. + // If empty, there is no error and all streams are committed successfully. + // If non empty, certain streams have errors and ZERO stream is committed due + // to atomicity guarantee. + repeated StorageError stream_errors = 2; +} + +// Request message for invoking `FinalizeWriteStream`. +message FinalizeWriteStreamRequest { + // Required. Name of the stream to finalize, in the form of + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; +} + +// Response message for `FinalizeWriteStream`. +message FinalizeWriteStreamResponse { + // Number of rows in the finalized stream. + int64 row_count = 1; +} + +// Request message for `FlushRows`. +message FlushRowsRequest { + // Required. The stream that is the target of the flush operation. + string write_stream = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; + + // Ending offset of the flush operation. Rows before this offset(including + // this offset) will be flushed. + google.protobuf.Int64Value offset = 2; +} + +// Respond message for `FlushRows`. +message FlushRowsResponse { + // The rows before this offset (including this offset) are flushed. + int64 offset = 1; +} + +// Structured custom BigQuery Storage error message. The error can be attached +// as error details in the returned rpc Status. In particular, the use of error +// codes allows more structured error handling, and reduces the need to evaluate +// unstructured error text strings. +message StorageError { + // Error code for `StorageError`. + enum StorageErrorCode { + // Default error. + STORAGE_ERROR_CODE_UNSPECIFIED = 0; + + // Table is not found in the system. + TABLE_NOT_FOUND = 1; + + // Stream is already committed. + STREAM_ALREADY_COMMITTED = 2; + + // Stream is not found. + STREAM_NOT_FOUND = 3; + + // Invalid Stream type. + // For example, you try to commit a stream that is not pending. + INVALID_STREAM_TYPE = 4; + + // Invalid Stream state. + // For example, you try to commit a stream that is not finalized or is + // garbaged. + INVALID_STREAM_STATE = 5; + + // Stream is finalized. + STREAM_FINALIZED = 6; + } + + // BigQuery Storage specific error code. + StorageErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/stream.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/stream.proto new file mode 100644 index 000000000000..c2d6d7b6a93a --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/stream.proto @@ -0,0 +1,191 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta2; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1beta2/arrow.proto"; +import "google/cloud/bigquery/storage/v1beta2/avro.proto"; +import "google/cloud/bigquery/storage/v1beta2/table.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta2/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "StreamProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta2"; +option (google.api.resource_definition) = { + type: "bigquery.googleapis.com/Table" + pattern: "projects/{project}/datasets/{dataset}/tables/{table}" +}; + +// Data format for input or output data. +enum DataFormat { + DATA_FORMAT_UNSPECIFIED = 0; + + // Avro is a standard open source row based file format. + // See https://avro.apache.org/ for more details. + AVRO = 1; + + // Arrow is a standard open source column-based message format. + // See https://arrow.apache.org/ for more details. + ARROW = 2; +} + +// Information about the ReadSession. +message ReadSession { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadSession" + pattern: "projects/{project}/locations/{location}/sessions/{session}" + }; + + // Additional attributes when reading a table. + message TableModifiers { + // The snapshot time of the table. If not set, interpreted as now. + google.protobuf.Timestamp snapshot_time = 1; + } + + // Options dictating how we read a table. + message TableReadOptions { + // Names of the fields in the table that should be read. If empty, all + // fields will be read. If the specified field is a nested field, all + // the sub-fields in the field will be selected. The output field order is + // unrelated to the order of fields in selected_fields. + repeated string selected_fields = 1; + + // SQL text filtering statement, similar to a WHERE clause in a query. + // Aggregates are not supported. + // + // Examples: "int_field > 5" + // "date_field = CAST('2014-9-27' as DATE)" + // "nullable_field is not NULL" + // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" + // "numeric_field BETWEEN 1.0 AND 5.0" + // + // Restricted to a maximum length for 1 MB. + string row_restriction = 2; + + // Optional. Options specific to the Apache Arrow output format. + ArrowSerializationOptions arrow_serialization_options = 3 [(google.api.field_behavior) = OPTIONAL]; + } + + // Output only. Unique identifier for the session, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time at which the session becomes invalid. After this time, subsequent + // requests to read this Session will return errors. The expire_time is + // automatically assigned and currently cannot be specified or updated. + google.protobuf.Timestamp expire_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Immutable. Data format of the output data. + DataFormat data_format = 3 [(google.api.field_behavior) = IMMUTABLE]; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. + oneof schema { + // Output only. Avro schema. + AvroSchema avro_schema = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Arrow schema. + ArrowSchema arrow_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Immutable. Table that this ReadSession is reading from, in the form + // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id} + string table = 6 [ + (google.api.field_behavior) = IMMUTABLE, + (google.api.resource_reference) = { + type: "bigquery.googleapis.com/Table" + } + ]; + + // Optional. Any modifiers which are applied when reading from the specified table. + TableModifiers table_modifiers = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Read options for this session (e.g. column selection, filters). + TableReadOptions read_options = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. A list of streams created with the session. + // + // At least one stream is created with the session. In the future, larger + // request_stream_count values *may* result in this list being unpopulated, + // in that case, the user will need to use a List method to get the streams + // instead, which is not yet available. + repeated ReadStream streams = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Information about a single stream that gets data out of the storage system. +// Most of the information about `ReadStream` instances is aggregated, making +// `ReadStream` lightweight. +message ReadStream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadStream" + pattern: "projects/{project}/locations/{location}/sessions/{session}/streams/{stream}" + }; + + // Output only. Name of the stream, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Information about a single stream that gets data inside the storage system. +message WriteStream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/WriteStream" + pattern: "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}" + }; + + // Type enum of the stream. + enum Type { + // Unknown type. + TYPE_UNSPECIFIED = 0; + + // Data will commit automatically and appear as soon as the write is + // acknowledged. + COMMITTED = 1; + + // Data is invisible until the stream is committed. + PENDING = 2; + + // Data is only visible up to the offset to which it was flushed. + BUFFERED = 3; + } + + // Output only. Name of the stream, in the form + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Immutable. Type of the stream. + Type type = 2 [(google.api.field_behavior) = IMMUTABLE]; + + // Output only. Create time of the stream. For the _default stream, this is the + // creation_time of the table. + google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Commit time of the stream. + // If a stream is of `COMMITTED` type, then it will have a commit_time same as + // `create_time`. If the stream is of `PENDING` type, commit_time being empty + // means it is not committed. + google.protobuf.Timestamp commit_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The schema of the destination table. It is only returned in + // `CreateWriteStream` response. Caller should generate data that's + // compatible with this schema to send in initial `AppendRowsRequest`. + // The table schema could go out of date during the life time of the stream. + TableSchema table_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/table.proto b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/table.proto new file mode 100644 index 000000000000..3dd27cf0f344 --- /dev/null +++ b/java-bigquerystorage/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/table.proto @@ -0,0 +1,111 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/storage/apiv1beta2/storagepb;storagepb"; +option java_multiple_files = true; +option java_outer_classname = "TableProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta2"; + +// Schema of a table +message TableSchema { + // Describes the fields in a table. + repeated TableFieldSchema fields = 1; +} + +// A field in TableSchema +message TableFieldSchema { + enum Type { + // Illegal value + TYPE_UNSPECIFIED = 0; + + // 64K, UTF8 + STRING = 1; + + // 64-bit signed + INT64 = 2; + + // 64-bit IEEE floating point + DOUBLE = 3; + + // Aggregate type + STRUCT = 4; + + // 64K, Binary + BYTES = 5; + + // 2-valued + BOOL = 6; + + // 64-bit signed usec since UTC epoch + TIMESTAMP = 7; + + // Civil date - Year, Month, Day + DATE = 8; + + // Civil time - Hour, Minute, Second, Microseconds + TIME = 9; + + // Combination of civil date and civil time + DATETIME = 10; + + // Geography object + GEOGRAPHY = 11; + + // Numeric value + NUMERIC = 12; + + // BigNumeric value + BIGNUMERIC = 13; + + // Interval + INTERVAL = 14; + + // JSON, String + JSON = 15; + } + + enum Mode { + // Illegal value + MODE_UNSPECIFIED = 0; + + NULLABLE = 1; + + REQUIRED = 2; + + REPEATED = 3; + } + + // Required. The field name. The name must contain only letters (a-z, A-Z), + // numbers (0-9), or underscores (_), and must start with a letter or + // underscore. The maximum length is 128 characters. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The field data type. + Type type = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The field mode. The default value is NULLABLE. + Mode mode = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Describes the nested schema fields if the type property is set to STRUCT. + repeated TableFieldSchema fields = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The field description. The maximum length is 1,024 characters. + string description = 6 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/java-bigquerystorage/samples/install-without-bom/pom.xml b/java-bigquerystorage/samples/install-without-bom/pom.xml new file mode 100644 index 000000000000..c7e67157bd21 --- /dev/null +++ b/java-bigquerystorage/samples/install-without-bom/pom.xml @@ -0,0 +1,152 @@ + + + 4.0.0 + com.google.cloud + bigquerystorage-install-without-bom + jar + Google BigQuery Storage Install Without Bom + https://github.com/googleapis/java-bigquerystorage + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + + 3.25.8 + 1.8 + 1.8 + UTF-8 + 15.0.2 + + + + + + + com.google.cloud + google-cloud-bigquerystorage + 3.17.2 + + + + + com.google.cloud + google-cloud-bigquery + 2.57.1 + + + org.apache.avro + avro + 1.11.4 + + + org.apache.arrow + arrow-vector + ${arrow.version} + + + org.apache.arrow + arrow-memory-netty + ${arrow.version} + + + org.apache.arrow + arrow-memory-core + ${arrow.version} + + + io.opentelemetry + opentelemetry-exporter-logging + 1.52.0 + + + com.google.cloud.opentelemetry + exporter-metrics + 0.36.0 + + + com.google.protobuf + protobuf-java-util + ${project.protobuf-java.version} + + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.4 + test + + + + + + + + kr.motd.maven + os-maven-plugin + 1.7.1 + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.6.1 + + + add-snippets-source + + add-source + + + + ../snippets/src/main/java + ../snippets/src/main/proto + + + + + add-snippets-tests + + add-test-source + + + + ../snippets/src/test/java + + + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:${project.protobuf-java.version}:exe:${os.detected.classifier} + ../snippets/src/main/proto + + + + + compile + + + + + + + diff --git a/java-bigquerystorage/samples/pom.xml b/java-bigquerystorage/samples/pom.xml new file mode 100644 index 000000000000..3fa08e0b8769 --- /dev/null +++ b/java-bigquerystorage/samples/pom.xml @@ -0,0 +1,63 @@ + + + 4.0.0 + com.google.cloud + google-cloud-bigquerystorage-samples + 0.0.1-SNAPSHOT + pom + Google BigQuery Storage Samples Parent + https://github.com/googleapis/java-bigquerystorage + + Java idiomatic client for Google Cloud Platform services. + + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + 1.8 + 1.8 + UTF-8 + + + + install-without-bom + snapshot + snippets + + + + + + org.apache.maven.plugins + maven-deploy-plugin + 3.1.4 + + true + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.7.0 + + true + + + + org.codehaus.mojo + exec-maven-plugin + + true + + + + + diff --git a/java-bigquerystorage/samples/snapshot/pom.xml b/java-bigquerystorage/samples/snapshot/pom.xml new file mode 100644 index 000000000000..2e61c8190c5a --- /dev/null +++ b/java-bigquerystorage/samples/snapshot/pom.xml @@ -0,0 +1,148 @@ + + + 4.0.0 + com.google.cloud + bigquerystorage-snapshot + jar + Google BigQuery Storage Snapshot Samples + https://github.com/googleapis/java-bigquerystorage + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + 3.25.8 + 1.8 + 1.8 + UTF-8 + 17.0.0 + + + + + + com.google.cloud + google-cloud-bigquerystorage + 3.19.1 + + + + + com.google.cloud + google-cloud-bigquery + 2.57.1 + + + org.apache.avro + avro + 1.11.4 + + + + org.apache.arrow + arrow-vector + ${arrow.version} + + + org.apache.arrow + arrow-memory-netty + ${arrow.version} + + + + io.opentelemetry + opentelemetry-exporter-logging + 1.52.0 + + + com.google.cloud.opentelemetry + exporter-metrics + 0.36.0 + + + + com.google.protobuf + protobuf-java-util + ${project.protobuf-java.version} + + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.4 + test + + + + + + + + kr.motd.maven + os-maven-plugin + 1.7.1 + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.6.1 + + + add-snippets-source + + add-source + + + + ../snippets/src/main/java + ../snippets/src/main/proto + + + + + add-snippets-tests + + add-test-source + + + + ../snippets/src/test/java + + + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:${project.protobuf-java.version}:exe:${os.detected.classifier} + ../snippets/src/main/proto + + + + + compile + + + + + + + diff --git a/java-bigquerystorage/samples/snippets/pom.xml b/java-bigquerystorage/samples/snippets/pom.xml new file mode 100644 index 000000000000..9d1318f91107 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/pom.xml @@ -0,0 +1,139 @@ + + + 4.0.0 + com.google.cloud + bigquerystorage-snippets + jar + Google BigQuery Storage Snippets + https://github.com/googleapis/java-bigquerystorage + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + + 3.25.8 + 1.8 + 1.8 + UTF-8 + 15.0.2 + + + + + + + + com.google.cloud + libraries-bom + 26.70.0 + pom + import + + + io.opentelemetry + opentelemetry-bom + 1.52.0 + pom + import + + + + + + + com.google.cloud + google-cloud-bigquerystorage + + + + + com.google.cloud + google-cloud-bigquery + 2.57.1 + + + org.apache.avro + avro + 1.11.4 + + + org.apache.arrow + arrow-vector + ${arrow.version} + + + org.apache.arrow + arrow-memory-netty + ${arrow.version} + + + org.apache.arrow + arrow-memory-core + ${arrow.version} + + + io.opentelemetry + opentelemetry-exporter-logging + + + com.google.cloud.opentelemetry + exporter-metrics + 0.36.0 + + + com.google.protobuf + protobuf-java-util + ${project.protobuf-java.version} + + + + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.4 + test + + + + + + + kr.motd.maven + os-maven-plugin + 1.7.1 + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:${project.protobuf-java.version}:exe:${os.detected.classifier} + + + + + compile + + + + + + + diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/ExportOpenTelemetry.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/ExportOpenTelemetry.java new file mode 100644 index 000000000000..0ef778df5cd2 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/ExportOpenTelemetry.java @@ -0,0 +1,335 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_jsonstreamwriter_export] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.FixedExecutorProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.QueryJobConfiguration; +import com.google.cloud.bigquery.TableResult; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteSettings; +import com.google.cloud.bigquery.storage.v1.Exceptions; +import com.google.cloud.bigquery.storage.v1.Exceptions.AppendSerializationError; +import com.google.cloud.bigquery.storage.v1.Exceptions.MaximumRequestCallbackWaitTimeExceededException; +import com.google.cloud.bigquery.storage.v1.Exceptions.StorageException; +import com.google.cloud.bigquery.storage.v1.Exceptions.StreamWriterClosedException; +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.ByteString; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.Phaser; +import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.concurrent.GuardedBy; +import org.json.JSONArray; +import org.json.JSONObject; +import org.threeten.bp.Duration; + +public class ExportOpenTelemetry { + + public static void runExportToOpenTelemetry() + throws DescriptorValidationException, InterruptedException, IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "MY_PROJECT_ID"; + String datasetName = "MY_DATASET_NAME"; + String tableName = "MY_TABLE_NAME"; + exportToOpenTelemetry(projectId, datasetName, tableName); + } + + private static ByteString buildByteString() { + byte[] bytes = new byte[] {1, 2, 3, 4, 5}; + return ByteString.copyFrom(bytes); + } + + // Create a JSON object that is compatible with the table schema. + private static JSONObject buildRecord(int i, int j) { + JSONObject record = new JSONObject(); + StringBuilder sbSuffix = new StringBuilder(); + for (int k = 0; k < j; k++) { + sbSuffix.append(k); + } + record.put("test_string", String.format("record %03d-%03d %s", i, j, sbSuffix.toString())); + ByteString byteString = buildByteString(); + record.put("test_bytes", byteString); + record.put( + "test_geo", + "POLYGON((-124.49 47.35,-124.49 40.73,-116.49 40.73,-116.49 47.35,-124.49 47.35))"); + return record; + } + + public static void exportToOpenTelemetry(String projectId, String datasetName, String tableName) + throws DescriptorValidationException, InterruptedException, IOException { + TableName parentTable = TableName.of(projectId, datasetName, tableName); + + DataWriter writer = new DataWriter(); + // One time initialization for the worker. + writer.initialize(parentTable); + + // Write two batches of fake data to the stream, each with 10 JSON records. Data may be + // batched up to the maximum request size: + // https://cloud.google.com/bigquery/quotas#write-api-limits + for (int i = 0; i < 2; i++) { + JSONArray jsonArr = new JSONArray(); + for (int j = 0; j < 10; j++) { + JSONObject record = buildRecord(i, j); + jsonArr.put(record); + } + + writer.append(new AppendContext(jsonArr)); + } + + // Final cleanup for the stream during worker teardown. + writer.cleanup(); + verifyExpectedRowCount(parentTable, 12L); + System.out.println("Appended records successfully."); + } + + private static void verifyExpectedRowCount(TableName parentTable, long expectedRowCount) + throws InterruptedException { + String queryRowCount = + "SELECT COUNT(*) FROM `" + + parentTable.getProject() + + "." + + parentTable.getDataset() + + "." + + parentTable.getTable() + + "`"; + QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(queryRowCount).build(); + BigQuery bigquery = BigQueryOptions.getDefaultInstance().getService(); + TableResult results = bigquery.query(queryConfig); + long countRowsActual = + Long.parseLong(results.getValues().iterator().next().get("f0_").getStringValue()); + if (countRowsActual != expectedRowCount) { + throw new RuntimeException( + "Unexpected row count. Expected: " + expectedRowCount + ". Actual: " + countRowsActual); + } + } + + private static class AppendContext { + + JSONArray data; + + AppendContext(JSONArray data) { + this.data = data; + } + } + + private static class DataWriter { + + private static final int MAX_RECREATE_COUNT = 3; + + private BigQueryWriteClient client; + + // Track the number of in-flight requests to wait for all responses before shutting down. + private final Phaser inflightRequestCount = new Phaser(1); + private final Object lock = new Object(); + private JsonStreamWriter streamWriter; + + @GuardedBy("lock") + private RuntimeException error = null; + + private AtomicInteger recreateCount = new AtomicInteger(0); + + private JsonStreamWriter createStreamWriter(String tableName) + throws DescriptorValidationException, IOException, InterruptedException { + // Configure in-stream automatic retry settings. + // Error codes that are immediately retried: + // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED + // Error codes that are retried with exponential backoff: + // * RESOURCE_EXHAUSTED + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .build(); + + // Use the JSON stream writer to send records in JSON format. Specify the table name to write + // to the default stream. + // For more information about JsonStreamWriter, see: + // https://googleapis.dev/java/google-cloud-bigquerystorage/latest/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.html + return JsonStreamWriter.newBuilder(tableName, client) + .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(10))) + .setChannelProvider( + BigQueryWriteSettings.defaultGrpcTransportProviderBuilder() + .setKeepAliveTime(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveTimeout(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveWithoutCalls(true) + .setChannelsPerCpu(2) + .build()) + .setEnableConnectionPool(true) + .setEnableOpenTelemetry(true) + // If value is missing in json and there is a default value configured on bigquery + // column, apply the default value to the missing value field. + .setDefaultMissingValueInterpretation( + AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE) + .setRetrySettings(retrySettings) + .build(); + } + + public void initialize(TableName parentTable) + throws DescriptorValidationException, IOException, InterruptedException { + // Initialize client without settings, internally within stream writer a new client will be + // created with full settings. + client = BigQueryWriteClient.create(); + + streamWriter = createStreamWriter(parentTable.toString()); + } + + public void append(AppendContext appendContext) + throws DescriptorValidationException, IOException, InterruptedException { + synchronized (this.lock) { + if (!streamWriter.isUserClosed() + && streamWriter.isClosed() + && recreateCount.getAndIncrement() < MAX_RECREATE_COUNT) { + streamWriter = createStreamWriter(streamWriter.getStreamName()); + this.error = null; + } + // If earlier appends have failed, we need to reset before continuing. + if (this.error != null) { + throw this.error; + } + } + // Append asynchronously for increased throughput. + ApiFuture future = streamWriter.append(appendContext.data); + ApiFutures.addCallback( + future, new AppendCompleteCallback(this, appendContext), MoreExecutors.directExecutor()); + + // Increase the count of in-flight requests. + inflightRequestCount.register(); + } + + public void cleanup() { + // Wait for all in-flight requests to complete. + inflightRequestCount.arriveAndAwaitAdvance(); + + client.close(); + // Close the connection to the server. + streamWriter.close(); + + // Verify that no error occurred in the stream. + synchronized (this.lock) { + if (this.error != null) { + throw this.error; + } + } + } + + static class AppendCompleteCallback implements ApiFutureCallback { + + private final DataWriter parent; + private final AppendContext appendContext; + + public AppendCompleteCallback(DataWriter parent, AppendContext appendContext) { + this.parent = parent; + this.appendContext = appendContext; + } + + public void onSuccess(AppendRowsResponse response) { + System.out.format("Append success\n"); + this.parent.recreateCount.set(0); + done(); + } + + public void onFailure(Throwable throwable) { + if (throwable instanceof AppendSerializationError) { + AppendSerializationError ase = (AppendSerializationError) throwable; + Map rowIndexToErrorMessage = ase.getRowIndexToErrorMessage(); + if (rowIndexToErrorMessage.size() > 0) { + // Omit the faulty rows + JSONArray dataNew = new JSONArray(); + for (int i = 0; i < appendContext.data.length(); i++) { + if (!rowIndexToErrorMessage.containsKey(i)) { + dataNew.put(appendContext.data.get(i)); + } else { + // process faulty rows by placing them on a dead-letter-queue, for instance + } + } + + // Retry the remaining valid rows, but using a separate thread to + // avoid potentially blocking while we are in a callback. + if (dataNew.length() > 0) { + try { + this.parent.append(new AppendContext(dataNew)); + } catch (DescriptorValidationException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + // Mark the existing attempt as done since we got a response for it + done(); + return; + } + } + + boolean resendRequest = false; + if (throwable instanceof MaximumRequestCallbackWaitTimeExceededException) { + resendRequest = true; + } else if (throwable instanceof StreamWriterClosedException) { + if (!parent.streamWriter.isUserClosed()) { + resendRequest = true; + } + } + if (resendRequest) { + // Retry this request. + try { + this.parent.append(new AppendContext(appendContext.data)); + } catch (DescriptorValidationException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + // Mark the existing attempt as done since we got a response for it + done(); + return; + } + + synchronized (this.parent.lock) { + if (this.parent.error == null) { + StorageException storageException = Exceptions.toStorageException(throwable); + this.parent.error = + (storageException != null) ? storageException : new RuntimeException(throwable); + } + } + done(); + } + + private void done() { + // Reduce the count of in-flight requests. + this.parent.inflightRequestCount.arriveAndDeregister(); + } + } + } +} +// [END bigquerystorage_jsonstreamwriter_export] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/JsonWriterStreamCdc.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/JsonWriterStreamCdc.java new file mode 100644 index 000000000000..d49bcebd2135 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/JsonWriterStreamCdc.java @@ -0,0 +1,221 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQueryException; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.QueryJobConfiguration; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import com.google.cloud.bigquery.storage.v1.TableFieldSchema; +import com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.cloud.bigquery.storage.v1.TableSchema; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import org.json.JSONArray; +import org.json.JSONObject; +import org.threeten.bp.Duration; + +public class JsonWriterStreamCdc { + + private static final String CHANGE_TYPE_PSEUDO_COLUMN = "_change_type"; + + private static final String CREATE_TABLE_QUERY = + "CREATE TABLE `%s.%s` (\n" + + " Customer_ID INT64 PRIMARY KEY NOT ENFORCED,\n" + + " Customer_Enrollment_Date DATE,\n" + + " Customer_Name STRING,\n" + + " Customer_Address STRING,\n" + + " Customer_Tier STRING,\n" + + " Active_Subscriptions JSON)\n" + + "OPTIONS(max_staleness = INTERVAL 15 MINUTE);"; + + private static final String ALTER_TABLE_QUERY = + "ALTER TABLE `%s.%s`\n" + "SET OPTIONS (\n" + " max_staleness = INTERVAL 0 MINUTE);\n"; + + public static void main(String[] args) throws Exception { + // This sample follows the BigQuery change data capture (CDC) blog post that can be found at: + // https://cloud.google.com/blog/products/data-analytics/bigquery-gains-change-data-capture-functionality + if (args.length != 5) { + System.out.println( + "Arguments: project, dataset, table, new_customers_data_file, " + + "modified_customers_data_file"); + return; + } + + final String projectId = args[0]; + final String datasetName = args[1]; + final String tableName = args[2]; + final String newCustomersDataFile = args[3]; + final String modifiedCustomersDataFile = args[4]; + + // Creates a destination table with (max_staleness = INTERVAL 15 MINUTE). + createDestinationTable(datasetName, tableName); + + // Write new customer records to the destination table using UPSERT. + JSONArray newCustomersRecords = getRecordsFromDataFile(newCustomersDataFile); + writeToDefaultStream(projectId, datasetName, tableName, newCustomersRecords); + + // Alter the destination table so that (max_staleness = INTERVAL 0 MINUTE). + alterDestinationTable(datasetName, tableName); + + // Modify the customer records in the destination table using UPSERT. + JSONArray modifiedCustomersRecords = getRecordsFromDataFile(modifiedCustomersDataFile); + writeToDefaultStream(projectId, datasetName, tableName, modifiedCustomersRecords); + } + + public static void createDestinationTable(String datasetName, String tableName) { + query(String.format(CREATE_TABLE_QUERY, datasetName, tableName)); + } + + public static void alterDestinationTable(String datasetName, String tableName) { + query(String.format(ALTER_TABLE_QUERY, datasetName, tableName)); + } + + private static void query(String query) { + BigQuery bigquery = BigQueryOptions.getDefaultInstance().getService(); + QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(query).build(); + try { + bigquery.query(queryConfig); + } catch (BigQueryException | InterruptedException e) { + System.out.println("Query did not run \n" + e.toString()); + } + } + + // writeToDefaultStream: Writes records from the source file to the destination table. + public static void writeToDefaultStream( + String projectId, String datasetName, String tableName, JSONArray data) + throws DescriptorValidationException, InterruptedException, IOException { + // Configure in-stream automatic retry settings. + // Error codes that are immediately retried: + // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED + // Error codes that are retried with exponential backoff: + // * RESOURCE_EXHAUSTED + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .build(); + // To use the UPSERT functionality, the table schema needs to be padded with an additional + // column "_change_type". + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder() + .setName("Customer_ID") + .setType(TableFieldSchema.Type.INT64) + .setMode(Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("Customer_Enrollment_Date") + .setType(TableFieldSchema.Type.DATE) + .setMode(Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("Customer_Name") + .setType(TableFieldSchema.Type.STRING) + .setMode(Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("Customer_Address") + .setType(TableFieldSchema.Type.STRING) + .setMode(Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("Customer_Tier") + .setType(TableFieldSchema.Type.STRING) + .setMode(Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName("Active_Subscriptions") + .setType(TableFieldSchema.Type.JSON) + .setMode(Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName(CHANGE_TYPE_PSEUDO_COLUMN) + .setType(TableFieldSchema.Type.STRING) + .setMode(Mode.NULLABLE) + .build()) + .build(); + + // Use the JSON stream writer to send records in JSON format. + TableName parentTable = TableName.of(projectId, datasetName, tableName); + try (JsonStreamWriter writer = + JsonStreamWriter.newBuilder(parentTable.toString(), tableSchema) + .setRetrySettings(retrySettings) + .build()) { + + ApiFuture future = writer.append(data); + // The append method is asynchronous. Rather than waiting for the method to complete, + // which can hurt performance, register a completion callback and continue streaming. + ApiFutures.addCallback(future, new AppendCompleteCallback(), MoreExecutors.directExecutor()); + } + } + + public static JSONArray getRecordsFromDataFile(String dataFile) + throws FileNotFoundException, IOException { + JSONArray result = new JSONArray(); + + BufferedReader reader = new BufferedReader(new FileReader(dataFile)); + String line = reader.readLine(); + while (line != null) { + JSONObject record = new JSONObject(line); + result.put(record); + line = reader.readLine(); + } + + return result; + } + + static class AppendCompleteCallback implements ApiFutureCallback { + private static final Object lock = new Object(); + private static int batchCount = 0; + + public void onSuccess(AppendRowsResponse response) { + synchronized (lock) { + if (response.hasError()) { + System.out.format("Error: %s\n", response.getError()); + } else { + ++batchCount; + System.out.format("Wrote batch %d\n", batchCount); + } + } + } + + public void onFailure(Throwable throwable) { + System.out.format("Error: %s\n", throwable.toString()); + } + } +} diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/ParallelWriteCommittedStream.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/ParallelWriteCommittedStream.java new file mode 100644 index 000000000000..b22dc22e0748 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/ParallelWriteCommittedStream.java @@ -0,0 +1,302 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_jsonstreamwriter_parallelcommitted] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BQTableSchemaToProtoDescriptor; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.JsonToProtoMessage; +import com.google.cloud.bigquery.storage.v1.ProtoRows; +import com.google.cloud.bigquery.storage.v1.ProtoSchema; +import com.google.cloud.bigquery.storage.v1.ProtoSchemaConverter; +import com.google.cloud.bigquery.storage.v1.StreamWriter; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import com.google.protobuf.Message; +import java.io.IOException; +import java.util.concurrent.ThreadLocalRandom; +import java.util.logging.Logger; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; +import org.json.JSONObject; +import org.threeten.bp.Duration; + +public class ParallelWriteCommittedStream { + + private static final Logger LOG = Logger.getLogger(ParallelWriteCommittedStream.class.getName()); + + // Total amount of test time. + private static final Duration TEST_TIME = Duration.ofSeconds(10); + + // How often to publish append stats. + private static final Duration METRICS_GAP = Duration.ofSeconds(5); + + // Size of each row to append. + private static final int ROW_SIZE = 1024; + + // The number of rows in each append request. + private static final long BATCH_SIZE = 10; + + // If true, switch to a new stream when append fails. + // If false, do not switch to a new stream. + private static final boolean SUPPORT_STREAM_SWITCH = false; + + @GuardedBy("this") + private long inflightCount = 0; + + @GuardedBy("this") + private long successCount = 0; + + @GuardedBy("this") + private long failureCount = 0; + + @GuardedBy("this") + private Throwable error = null; + + @GuardedBy("this") + private long lastMetricsTimeMillis = 0; + + @GuardedBy("this") + private long lastMetricsSuccessCount = 0; + + @GuardedBy("this") + private long lastMetricsFailureCount = 0; + + public void writeLoop( + String projectId, String datasetName, String tableName, BigQueryWriteClient client) { + LOG.info("Start writeLoop"); + long streamSwitchCount = 0; + long successRowCount = 0; + long failureRowCount = 0; + Throwable loggedError = null; + long deadlineMillis = System.currentTimeMillis() + TEST_TIME.toMillis(); + while (System.currentTimeMillis() < deadlineMillis) { + try { + WriteStream writeStream = createStream(projectId, datasetName, tableName, client); + writeToStream(client, writeStream, deadlineMillis); + } catch (Throwable e) { + LOG.warning("Unexpected error writing to stream: " + e.toString()); + } + waitForInflightToReachZero(Duration.ofMinutes(1)); + synchronized (this) { + successRowCount += successCount * BATCH_SIZE; + failureRowCount += failureCount * BATCH_SIZE; + if (loggedError == null) { + loggedError = error; + } + } + if (!SUPPORT_STREAM_SWITCH) { + // If stream switch is disabled, break. + break; + } + LOG.info("Sleeping before switching stream."); + sleepIgnoringInterruption(Duration.ofMinutes(1)); + streamSwitchCount++; + } + LOG.info( + "Finish writeLoop. Success row count: " + + successRowCount + + " Failure row count: " + + failureRowCount + + " Logged error: " + + loggedError + + " Stream switch count: " + + streamSwitchCount); + if (successRowCount > 0 && failureRowCount == 0 && loggedError == null) { + System.out.println("All records are appended successfully."); + } + } + + private WriteStream createStream( + String projectId, String datasetName, String tableName, BigQueryWriteClient client) { + LOG.info("Creating a new stream"); + // Initialize a write stream for the specified table. + // For more information on WriteStream.Type, see: + // https://googleapis.dev/java/google-cloud-bigquerystorage/latest/com/google/cloud/bigquery/storage/v1/WriteStream.Type.html + WriteStream stream = WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build(); + TableName parentTable = TableName.of(projectId, datasetName, tableName); + CreateWriteStreamRequest createWriteStreamRequest = + CreateWriteStreamRequest.newBuilder() + .setParent(parentTable.toString()) + .setWriteStream(stream) + .build(); + return client.createWriteStream(createWriteStreamRequest); + } + + private void writeToStream( + BigQueryWriteClient client, WriteStream writeStream, long deadlineMillis) throws Throwable { + LOG.info("Start writing to new stream:" + writeStream.getName()); + synchronized (this) { + inflightCount = 0; + successCount = 0; + failureCount = 0; + error = null; + lastMetricsTimeMillis = System.currentTimeMillis(); + lastMetricsSuccessCount = 0; + lastMetricsFailureCount = 0; + } + // Configure in-stream automatic retry settings. + // Error codes that are immediately retried: + // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED + // Error codes that are retried with exponential backoff: + // * RESOURCE_EXHAUSTED + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .build(); + Descriptor descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor( + writeStream.getTableSchema()); + ProtoSchema protoSchema = ProtoSchemaConverter.convert(descriptor); + try (StreamWriter writer = + StreamWriter.newBuilder(writeStream.getName()) + .setWriterSchema(protoSchema) + .setRetrySettings(retrySettings) + .setTraceId("SAMPLE:parallel_append") + .build()) { + while (System.currentTimeMillis() < deadlineMillis) { + synchronized (this) { + if (error != null) { + // Stop writing once we get an error. + throw error; + } + } + ApiFuture future = writer.append(createAppendRows(descriptor), -1); + synchronized (this) { + inflightCount++; + } + ApiFutures.addCallback( + future, new AppendCompleteCallback(this), MoreExecutors.directExecutor()); + } + } + } + + private void waitForInflightToReachZero(Duration timeout) { + LOG.info("Waiting for inflight count to reach zero."); + long deadlineMillis = System.currentTimeMillis() + timeout.toMillis(); + while (System.currentTimeMillis() < deadlineMillis) { + synchronized (this) { + if (inflightCount == 0) { + LOG.info("Inflight count has reached zero."); + return; + } + } + sleepIgnoringInterruption(Duration.ofSeconds(1)); + } + throw new RuntimeException("Timeout waiting for inflight count to reach 0"); + } + + private ProtoRows createAppendRows(Descriptor descriptor) { + ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); + for (int i = 0; i < BATCH_SIZE; i++) { + byte[] payload = new byte[ROW_SIZE]; + ThreadLocalRandom.current().nextBytes(payload); + JSONObject record = new JSONObject(); + record.put("col1", new String(payload)); + Message protoMessage = JsonToProtoMessage.convertJsonToProtoMessage(descriptor, record); + rowsBuilder.addSerializedRows(protoMessage.toByteString()); + } + return rowsBuilder.build(); + } + + private void sleepIgnoringInterruption(Duration duration) { + try { + Thread.sleep(duration.toMillis()); + } catch (InterruptedException e) { + LOG.warning("Sleep is interrupted."); + } + } + + /* + * Callback when Append request is completed. + * + * It keeps track of count. + */ + private class AppendCompleteCallback implements ApiFutureCallback { + + private final ParallelWriteCommittedStream parent; + + AppendCompleteCallback(ParallelWriteCommittedStream parent) { + this.parent = parent; + } + + @Override + public void onSuccess(@Nullable AppendRowsResponse response) { + synchronized (parent) { + parent.inflightCount--; + if (!response.hasError()) { + parent.successCount++; + } else { + parent.failureCount++; + } + long nowMillis = System.currentTimeMillis(); + if (nowMillis >= parent.lastMetricsTimeMillis + METRICS_GAP.toMillis()) { + long successCountInIteration = parent.successCount - parent.lastMetricsSuccessCount; + long failureCountInIteration = parent.failureCount - parent.lastMetricsFailureCount; + long metricsTimeMillis = nowMillis - parent.lastMetricsTimeMillis; + LOG.info( + "Success append: " + + successCountInIteration + + " failure append: " + + failureCountInIteration + + " in " + + metricsTimeMillis + + "ms. Successful MB Per Second: " + + (double) (successCountInIteration * BATCH_SIZE * ROW_SIZE) + / metricsTimeMillis + / 1000 + + " Current inflight: " + + parent.inflightCount); + parent.lastMetricsTimeMillis = System.currentTimeMillis(); + parent.lastMetricsSuccessCount = parent.successCount; + parent.lastMetricsFailureCount = parent.failureCount; + } + } + } + + @Override + public void onFailure(Throwable throwable) { + synchronized (parent) { + parent.inflightCount--; + parent.error = throwable; + LOG.warning("Found failure: " + throwable.toString()); + } + } + } + + public static void writeCommittedStream(String projectId, String datasetName, String tableName) + throws DescriptorValidationException, InterruptedException, IOException { + try (BigQueryWriteClient client = BigQueryWriteClient.create()) { + new ParallelWriteCommittedStream().writeLoop(projectId, datasetName, tableName, client); + } catch (Exception e) { + System.out.println("Failed to append records. \n" + e.toString()); + } + } +} +// [END bigquerystorage_jsonstreamwriter_parallelcommitted] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampArrow.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampArrow.java new file mode 100644 index 000000000000..829bbb31e9a5 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampArrow.java @@ -0,0 +1,185 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_read_timestamp_arrow] +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.bigquery.storage.v1.ArrowRecordBatch; +import com.google.cloud.bigquery.storage.v1.ArrowSchema; +import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.DataFormat; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers; +import com.google.common.base.Preconditions; +import com.google.protobuf.Timestamp; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.FieldVector; +import org.apache.arrow.vector.VectorLoader; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.ipc.ReadChannel; +import org.apache.arrow.vector.ipc.message.MessageSerializer; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.types.pojo.Schema; +import org.apache.arrow.vector.util.ByteArrayReadableSeekableByteChannel; + +/** + * Depending on the JDK version, you may need to include this into your VM options: {@code + * --add-opens=java.base/java.nio=org.apache.arrow.memory.core,ALL-UNNAMED} + * + *

See the documentation for + * more information. + */ +public class ReadTimestampArrow { + /* + * SimpleRowReader handles deserialization of the Apache Arrow-encoded row batches transmitted + * from the storage API using a generic datum decoder. + */ + private static class SimpleRowReader implements AutoCloseable { + + BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE); + + // Decoder object will be reused to avoid re-allocation and too much garbage collection. + private final VectorSchemaRoot root; + private final VectorLoader loader; + + public SimpleRowReader(ArrowSchema arrowSchema) throws IOException { + Schema schema = + MessageSerializer.deserializeSchema( + new ReadChannel( + new ByteArrayReadableSeekableByteChannel( + arrowSchema.getSerializedSchema().toByteArray()))); + Preconditions.checkNotNull(schema); + List vectors = new ArrayList<>(); + for (Field field : schema.getFields()) { + vectors.add(field.createVector(allocator)); + } + root = new VectorSchemaRoot(vectors); + loader = new VectorLoader(root); + } + + /** + * Sample method for processing Arrow data which only validates decoding. + * + * @param batch object returned from the ReadRowsResponse. + */ + public void processRows(ArrowRecordBatch batch) throws IOException { + org.apache.arrow.vector.ipc.message.ArrowRecordBatch deserializedBatch = + MessageSerializer.deserializeRecordBatch( + new ReadChannel( + new ByteArrayReadableSeekableByteChannel( + batch.getSerializedRecordBatch().toByteArray())), + allocator); + + loader.load(deserializedBatch); + // Release buffers from batch (they are still held in the vectors in root). + deserializedBatch.close(); + System.out.println(root.contentToTSVString()); + // Release buffers from vectors in root. + root.clear(); + } + + @Override + public void close() { + root.close(); + allocator.close(); + } + } + + public static void main(String... args) throws Exception { + // Sets your Google Cloud Platform project ID. + String projectId = args[0]; + Long snapshotMillis = null; + if (args.length > 1) { + snapshotMillis = Long.parseLong(args[1]); + } + + try (BigQueryReadClient client = BigQueryReadClient.create()) { + String parent = String.format("projects/%s", projectId); + + // This example uses citibike data from the public datasets. + String srcTable = + String.format( + "projects/%s/datasets/%s/tables/%s", + "bigquery-public-data", "new_york_citibike", "citibike_stations"); + + // We specify the columns to be projected by adding them to the selected fields, + ReadSession.TableReadOptions options = + ReadSession.TableReadOptions.newBuilder().addSelectedFields("last_reported").build(); + + // Start specifying the read session we want created. + ReadSession.Builder sessionBuilder = + ReadSession.newBuilder() + .setTable(srcTable) + // This API can also deliver data serialized in Apache Avro format. + // This example leverages Apache Arrow. + .setDataFormat(DataFormat.ARROW) + .setReadOptions(options); + + // Optionally specify the snapshot time. When unspecified, snapshot time is "now". + if (snapshotMillis != null) { + Timestamp t = + Timestamp.newBuilder() + .setSeconds(snapshotMillis / 1000) + .setNanos((int) ((snapshotMillis % 1000) * 1000000)) + .build(); + TableModifiers modifiers = TableModifiers.newBuilder().setSnapshotTime(t).build(); + sessionBuilder.setTableModifiers(modifiers); + } + + // Begin building the session creation request. + CreateReadSessionRequest.Builder builder = + CreateReadSessionRequest.newBuilder() + .setParent(parent) + .setReadSession(sessionBuilder) + .setMaxStreamCount(1); + + ReadSession session = client.createReadSession(builder.build()); + // Setup a simple reader and start a read session. + try (ReadTimestampArrow.SimpleRowReader reader = + new ReadTimestampArrow.SimpleRowReader(session.getArrowSchema())) { + + // Assert that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results + // directly. + Preconditions.checkState(session.getStreamsCount() > 0); + + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + // Process each block of rows as they arrive and decode using our simple row reader. + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasArrowRecordBatch()); + reader.processRows(response.getArrowRecordBatch()); + } + } + } + } +} +// [END bigquerystorage_read_timestamp_arrow] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampAvro.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampAvro.java new file mode 100644 index 000000000000..6343c7739fc1 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampAvro.java @@ -0,0 +1,151 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_read_timestamp_avro] +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.bigquery.storage.v1.AvroRows; +import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.DataFormat; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers; +import com.google.common.base.Preconditions; +import com.google.protobuf.Timestamp; +import java.io.IOException; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.BinaryDecoder; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.DecoderFactory; + +public class ReadTimestampAvro { + /* + * SimpleRowReader handles deserialization of the Avro-encoded row blocks transmitted + * from the storage API using a generic datum decoder. + */ + private static class SimpleRowReader { + + private final DatumReader datumReader; + + // Decoder object will be reused to avoid re-allocation and too much garbage collection. + private BinaryDecoder decoder = null; + + // GenericRecord object will be reused. + private GenericRecord row = null; + + public SimpleRowReader(Schema schema) { + Preconditions.checkNotNull(schema); + datumReader = new GenericDatumReader<>(schema); + } + + /** + * Sample method for processing AVRO rows which only validates decoding. + * + * @param avroRows object returned from the ReadRowsResponse. + */ + public void processRows(AvroRows avroRows) throws IOException { + decoder = + DecoderFactory.get() + .binaryDecoder(avroRows.getSerializedBinaryRows().toByteArray(), decoder); + + while (!decoder.isEnd()) { + // Reusing object row + row = datumReader.read(row, decoder); + System.out.println(row.toString()); + } + } + } + + public static void main(String... args) throws Exception { + // Sets your Google Cloud Platform project ID. + String projectId = args[0]; + Long snapshotMillis = null; + if (args.length > 1) { + snapshotMillis = Long.parseLong(args[1]); + } + + try (BigQueryReadClient client = BigQueryReadClient.create()) { + String parent = String.format("projects/%s", projectId); + + // This example uses citibike data from the public datasets. + String srcTable = + String.format( + "projects/%s/datasets/%s/tables/%s", + "bigquery-public-data", "new_york_citibike", "citibike_stations"); + + // We specify the columns to be projected by adding them to the selected fields, + ReadSession.TableReadOptions options = + ReadSession.TableReadOptions.newBuilder().addSelectedFields("last_reported").build(); + + // Start specifying the read session we want created. + ReadSession.Builder sessionBuilder = + ReadSession.newBuilder() + .setTable(srcTable) + // This API can also deliver data serialized in Apache Avro format. + // This example leverages Apache Avro. + .setDataFormat(DataFormat.AVRO) + .setReadOptions(options); + + // Optionally specify the snapshot time. When unspecified, snapshot time is "now". + if (snapshotMillis != null) { + Timestamp t = + Timestamp.newBuilder() + .setSeconds(snapshotMillis / 1000) + .setNanos((int) ((snapshotMillis % 1000) * 1000000)) + .build(); + TableModifiers modifiers = TableModifiers.newBuilder().setSnapshotTime(t).build(); + sessionBuilder.setTableModifiers(modifiers); + } + + // Begin building the session creation request. + CreateReadSessionRequest.Builder builder = + CreateReadSessionRequest.newBuilder() + .setParent(parent) + .setReadSession(sessionBuilder) + .setMaxStreamCount(1); + + // Request the session creation. + ReadSession session = client.createReadSession(builder.build()); + + SimpleRowReader reader = + new SimpleRowReader(new Schema.Parser().parse(session.getAvroSchema().getSchema())); + + // Assert that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results directly. + Preconditions.checkState(session.getStreamsCount() > 0); + + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + // Process each block of rows as they arrive and decode using our simple row reader. + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasAvroRows()); + reader.processRows(response.getAvroRows()); + } + } + } +} +// [END bigquerystorage_read_timestamp_avro] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/StorageArrowSample.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/StorageArrowSample.java new file mode 100644 index 000000000000..23e1e19a38f6 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/StorageArrowSample.java @@ -0,0 +1,187 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_arrow_quickstart] + +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.bigquery.storage.v1.ArrowRecordBatch; +import com.google.cloud.bigquery.storage.v1.ArrowSchema; +import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.DataFormat; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers; +import com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions; +import com.google.common.base.Preconditions; +import com.google.protobuf.Timestamp; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.FieldVector; +import org.apache.arrow.vector.VectorLoader; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.ipc.ReadChannel; +import org.apache.arrow.vector.ipc.message.MessageSerializer; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.types.pojo.Schema; +import org.apache.arrow.vector.util.ByteArrayReadableSeekableByteChannel; + +public class StorageArrowSample { + + /* + * SimpleRowReader handles deserialization of the Apache Arrow-encoded row batches transmitted + * from the storage API using a generic datum decoder. + */ + private static class SimpleRowReader implements AutoCloseable { + + BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE); + + // Decoder object will be reused to avoid re-allocation and too much garbage collection. + private final VectorSchemaRoot root; + private final VectorLoader loader; + + public SimpleRowReader(ArrowSchema arrowSchema) throws IOException { + Schema schema = + MessageSerializer.deserializeSchema( + new ReadChannel( + new ByteArrayReadableSeekableByteChannel( + arrowSchema.getSerializedSchema().toByteArray()))); + Preconditions.checkNotNull(schema); + List vectors = new ArrayList<>(); + for (Field field : schema.getFields()) { + vectors.add(field.createVector(allocator)); + } + root = new VectorSchemaRoot(vectors); + loader = new VectorLoader(root); + } + + /** + * Sample method for processing Arrow data which only validates decoding. + * + * @param batch object returned from the ReadRowsResponse. + */ + public void processRows(ArrowRecordBatch batch) throws IOException { + org.apache.arrow.vector.ipc.message.ArrowRecordBatch deserializedBatch = + MessageSerializer.deserializeRecordBatch( + new ReadChannel( + new ByteArrayReadableSeekableByteChannel( + batch.getSerializedRecordBatch().toByteArray())), + allocator); + + loader.load(deserializedBatch); + // Release buffers from batch (they are still held in the vectors in root). + deserializedBatch.close(); + System.out.println(root.contentToTSVString()); + // Release buffers from vectors in root. + root.clear(); + } + + @Override + public void close() { + root.close(); + allocator.close(); + } + } + + public static void main(String... args) throws Exception { + // Sets your Google Cloud Platform project ID. + // String projectId = "YOUR_PROJECT_ID"; + String projectId = args[0]; + Integer snapshotMillis = null; + if (args.length > 1) { + snapshotMillis = Integer.parseInt(args[1]); + } + + try (BigQueryReadClient client = BigQueryReadClient.create()) { + String parent = String.format("projects/%s", projectId); + + // This example uses baby name data from the public datasets. + String srcTable = + String.format( + "projects/%s/datasets/%s/tables/%s", + "bigquery-public-data", "usa_names", "usa_1910_current"); + + // We specify the columns to be projected by adding them to the selected fields, + // and set a simple filter to restrict which rows are transmitted. + TableReadOptions options = + TableReadOptions.newBuilder() + .addSelectedFields("name") + .addSelectedFields("number") + .addSelectedFields("state") + .setRowRestriction("state = \"WA\"") + .build(); + + // Start specifying the read session we want created. + ReadSession.Builder sessionBuilder = + ReadSession.newBuilder() + .setTable(srcTable) + // This API can also deliver data serialized in Apache Avro format. + // This example leverages Apache Arrow. + .setDataFormat(DataFormat.ARROW) + .setReadOptions(options); + + // Optionally specify the snapshot time. When unspecified, snapshot time is "now". + if (snapshotMillis != null) { + Timestamp t = + Timestamp.newBuilder() + .setSeconds(snapshotMillis / 1000) + .setNanos((int) ((snapshotMillis % 1000) * 1000000)) + .build(); + TableModifiers modifiers = TableModifiers.newBuilder().setSnapshotTime(t).build(); + sessionBuilder.setTableModifiers(modifiers); + } + + // Begin building the session creation request. + CreateReadSessionRequest.Builder builder = + CreateReadSessionRequest.newBuilder() + .setParent(parent) + .setReadSession(sessionBuilder) + .setMaxStreamCount(1); + + ReadSession session = client.createReadSession(builder.build()); + // Setup a simple reader and start a read session. + try (SimpleRowReader reader = new SimpleRowReader(session.getArrowSchema())) { + + // Assert that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results + // directly. + Preconditions.checkState(session.getStreamsCount() > 0); + + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + // Process each block of rows as they arrive and decode using our simple row reader. + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasArrowRecordBatch()); + reader.processRows(response.getArrowRecordBatch()); + } + } + } + } +} + +// [END bigquerystorage_arrow_quickstart] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/StorageSample.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/StorageSample.java new file mode 100644 index 000000000000..1bc6f54f90b6 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/StorageSample.java @@ -0,0 +1,160 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_quickstart] +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.bigquery.storage.v1.AvroRows; +import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.DataFormat; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers; +import com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions; +import com.google.common.base.Preconditions; +import com.google.protobuf.Timestamp; +import java.io.IOException; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.BinaryDecoder; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.DecoderFactory; + +public class StorageSample { + + /* + * SimpleRowReader handles deserialization of the Avro-encoded row blocks transmitted + * from the storage API using a generic datum decoder. + */ + private static class SimpleRowReader { + + private final DatumReader datumReader; + + // Decoder object will be reused to avoid re-allocation and too much garbage collection. + private BinaryDecoder decoder = null; + + // GenericRecord object will be reused. + private GenericRecord row = null; + + public SimpleRowReader(Schema schema) { + Preconditions.checkNotNull(schema); + datumReader = new GenericDatumReader<>(schema); + } + + /** + * Sample method for processing AVRO rows which only validates decoding. + * + * @param avroRows object returned from the ReadRowsResponse. + */ + public void processRows(AvroRows avroRows) throws IOException { + decoder = + DecoderFactory.get() + .binaryDecoder(avroRows.getSerializedBinaryRows().toByteArray(), decoder); + + while (!decoder.isEnd()) { + // Reusing object row + row = datumReader.read(row, decoder); + System.out.println(row.toString()); + } + } + } + + public static void main(String... args) throws Exception { + // Sets your Google Cloud Platform project ID. + // String projectId = "YOUR_PROJECT_ID"; + String projectId = args[0]; + Integer snapshotMillis = null; + if (args.length > 1) { + snapshotMillis = Integer.parseInt(args[1]); + } + + try (BigQueryReadClient client = BigQueryReadClient.create()) { + String parent = String.format("projects/%s", projectId); + + // This example uses baby name data from the public datasets. + String srcTable = + String.format( + "projects/%s/datasets/%s/tables/%s", + "bigquery-public-data", "usa_names", "usa_1910_current"); + + // We specify the columns to be projected by adding them to the selected fields, + // and set a simple filter to restrict which rows are transmitted. + TableReadOptions options = + TableReadOptions.newBuilder() + .addSelectedFields("name") + .addSelectedFields("number") + .addSelectedFields("state") + .setRowRestriction("state = \"WA\"") + .build(); + + // Start specifying the read session we want created. + ReadSession.Builder sessionBuilder = + ReadSession.newBuilder() + .setTable(srcTable) + // This API can also deliver data serialized in Apache Avro format. + // This example leverages Apache Avro. + .setDataFormat(DataFormat.AVRO) + .setReadOptions(options); + + // Optionally specify the snapshot time. When unspecified, snapshot time is "now". + if (snapshotMillis != null) { + Timestamp t = + Timestamp.newBuilder() + .setSeconds(snapshotMillis / 1000) + .setNanos((int) ((snapshotMillis % 1000) * 1000000)) + .build(); + TableModifiers modifiers = TableModifiers.newBuilder().setSnapshotTime(t).build(); + sessionBuilder.setTableModifiers(modifiers); + } + + // Begin building the session creation request. + CreateReadSessionRequest.Builder builder = + CreateReadSessionRequest.newBuilder() + .setParent(parent) + .setReadSession(sessionBuilder) + .setMaxStreamCount(1); + + // Request the session creation. + ReadSession session = client.createReadSession(builder.build()); + + SimpleRowReader reader = + new SimpleRowReader(new Schema.Parser().parse(session.getAvroSchema().getSchema())); + + // Assert that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results directly. + Preconditions.checkState(session.getStreamsCount() > 0); + + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + // Process each block of rows as they arrive and decode using our simple row reader. + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasAvroRows()); + reader.processRows(response.getAvroRows()); + } + } + } +} +// [END bigquerystorage_quickstart] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteBufferedStream.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteBufferedStream.java new file mode 100644 index 000000000000..6532deb91b23 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteBufferedStream.java @@ -0,0 +1,120 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_jsonstreamwriter_buffered] +import com.google.api.core.ApiFuture; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.FlushRowsRequest; +import com.google.cloud.bigquery.storage.v1.FlushRowsResponse; +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import com.google.protobuf.Int64Value; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import org.json.JSONArray; +import org.json.JSONObject; +import org.threeten.bp.Duration; + +public class WriteBufferedStream { + + public static void runWriteBufferedStream() + throws DescriptorValidationException, InterruptedException, IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "MY_PROJECT_ID"; + String datasetName = "MY_DATASET_NAME"; + String tableName = "MY_TABLE_NAME"; + + writeBufferedStream(projectId, datasetName, tableName); + } + + public static void writeBufferedStream(String projectId, String datasetName, String tableName) + throws DescriptorValidationException, InterruptedException, IOException { + try (BigQueryWriteClient client = BigQueryWriteClient.create()) { + // Initialize a write stream for the specified table. + // For more information on WriteStream.Type, see: + // https://googleapis.dev/java/google-cloud-bigquerystorage/latest/com/google/cloud/bigquery/storage/v1/WriteStream.Type.html + WriteStream stream = WriteStream.newBuilder().setType(WriteStream.Type.BUFFERED).build(); + TableName parentTable = TableName.of(projectId, datasetName, tableName); + CreateWriteStreamRequest createWriteStreamRequest = + CreateWriteStreamRequest.newBuilder() + .setParent(parentTable.toString()) + .setWriteStream(stream) + .build(); + WriteStream writeStream = client.createWriteStream(createWriteStreamRequest); + + // Configure in-stream automatic retry settings. + // Error codes that are immediately retried: + // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED + // Error codes that are retried with exponential backoff: + // * RESOURCE_EXHAUSTED + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .build(); + + // Use the JSON stream writer to send records in JSON format. + // For more information about JsonStreamWriter, see: + // https://cloud.google.com/java/docs/reference/google-cloud-bigquerystorage/latest/com.google.cloud.bigquery.storage.v1.JsonStreamWriter + try (JsonStreamWriter writer = + JsonStreamWriter.newBuilder(writeStream.getName(), writeStream.getTableSchema()) + .setRetrySettings(retrySettings) + .build()) { + // Write two batches to the stream, each with 10 JSON records. + for (int i = 0; i < 2; i++) { + JSONArray jsonArr = new JSONArray(); + for (int j = 0; j < 10; j++) { + // Create a JSON object that is compatible with the table schema. + JSONObject record = new JSONObject(); + record.put("col1", String.format("buffered-record %03d", i)); + jsonArr.put(record); + } + ApiFuture future = writer.append(jsonArr); + AppendRowsResponse response = future.get(); + } + // Flush the buffer. + FlushRowsRequest flushRowsRequest = + FlushRowsRequest.newBuilder() + .setWriteStream(writeStream.getName()) + .setOffset(Int64Value.of(10 * 2 - 1)) // Advance the cursor to the latest record. + .build(); + FlushRowsResponse flushRowsResponse = client.flushRows(flushRowsRequest); + // You can continue to write to the stream after flushing the buffer. + } + // Finalize the stream after use. + FinalizeWriteStreamRequest finalizeWriteStreamRequest = + FinalizeWriteStreamRequest.newBuilder().setName(writeStream.getName()).build(); + client.finalizeWriteStream(finalizeWriteStreamRequest); + System.out.println("Appended and committed records successfully."); + } catch (ExecutionException e) { + // If the wrapped exception is a StatusRuntimeException, check the state of the operation. + // If the state is INTERNAL, CANCELLED, or ABORTED, you can retry. For more information, see: + // https://grpc.github.io/grpc-java/javadoc/io/grpc/StatusRuntimeException.html + System.out.println(e); + } + } +} +// [END bigquerystorage_jsonstreamwriter_buffered] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteCommittedStream.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteCommittedStream.java new file mode 100644 index 000000000000..d953ebf8adb0 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteCommittedStream.java @@ -0,0 +1,212 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_jsonstreamwriter_committed] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.Exceptions; +import com.google.cloud.bigquery.storage.v1.Exceptions.StorageException; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Phaser; +import javax.annotation.concurrent.GuardedBy; +import org.json.JSONArray; +import org.json.JSONObject; +import org.threeten.bp.Duration; + +public class WriteCommittedStream { + + public static void runWriteCommittedStream() + throws DescriptorValidationException, InterruptedException, IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "MY_PROJECT_ID"; + String datasetName = "MY_DATASET_NAME"; + String tableName = "MY_TABLE_NAME"; + + writeCommittedStream(projectId, datasetName, tableName); + } + + public static void writeCommittedStream(String projectId, String datasetName, String tableName) + throws DescriptorValidationException, InterruptedException, IOException { + BigQueryWriteClient client = BigQueryWriteClient.create(); + TableName parentTable = TableName.of(projectId, datasetName, tableName); + + DataWriter writer = new DataWriter(); + // One time initialization. + writer.initialize(parentTable, client); + + try { + // Write two batches of fake data to the stream, each with 10 JSON records. Data may be + // batched up to the maximum request size: + // https://cloud.google.com/bigquery/quotas#write-api-limits + long offset = 0; + for (int i = 0; i < 2; i++) { + // Create a JSON object that is compatible with the table schema. + JSONArray jsonArr = new JSONArray(); + for (int j = 0; j < 10; j++) { + JSONObject record = new JSONObject(); + record.put("col1", String.format("batch-record %03d-%03d", i, j)); + jsonArr.put(record); + } + writer.append(jsonArr, offset); + offset += jsonArr.length(); + } + } catch (ExecutionException e) { + // If the wrapped exception is a StatusRuntimeException, check the state of the operation. + // If the state is INTERNAL, CANCELLED, or ABORTED, you can retry. For more information, see: + // https://grpc.github.io/grpc-java/javadoc/io/grpc/StatusRuntimeException.html + System.out.println("Failed to append records. \n" + e); + } + + // Final cleanup for the stream. + writer.cleanup(client); + System.out.println("Appended records successfully."); + } + + // A simple wrapper object showing how the stateful stream writer should be used. + private static class DataWriter { + + private JsonStreamWriter streamWriter; + // Track the number of in-flight requests to wait for all responses before shutting down. + private final Phaser inflightRequestCount = new Phaser(1); + + private final Object lock = new Object(); + + @GuardedBy("lock") + private RuntimeException error = null; + + void initialize(TableName parentTable, BigQueryWriteClient client) + throws IOException, DescriptorValidationException, InterruptedException { + // Initialize a write stream for the specified table. + // For more information on WriteStream.Type, see: + // https://googleapis.dev/java/google-cloud-bigquerystorage/latest/com/google/cloud/bigquery/storage/v1/WriteStream.Type.html + WriteStream stream = WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build(); + + CreateWriteStreamRequest createWriteStreamRequest = + CreateWriteStreamRequest.newBuilder() + .setParent(parentTable.toString()) + .setWriteStream(stream) + .build(); + WriteStream writeStream = client.createWriteStream(createWriteStreamRequest); + + // Configure in-stream automatic retry settings. + // Error codes that are immediately retried: + // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED + // Error codes that are retried with exponential backoff: + // * RESOURCE_EXHAUSTED + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .build(); + + // Use the JSON stream writer to send records in JSON format. + // For more information about JsonStreamWriter, see: + // https://googleapis.dev/java/google-cloud-bigquerystorage/latest/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.html + streamWriter = + JsonStreamWriter.newBuilder(writeStream.getName(), writeStream.getTableSchema(), client) + .setRetrySettings(retrySettings) + .build(); + } + + public void append(JSONArray data, long offset) + throws DescriptorValidationException, IOException, ExecutionException { + synchronized (this.lock) { + // If earlier appends have failed, we need to reset before continuing. + if (this.error != null) { + throw this.error; + } + } + // Append asynchronously for increased throughput. + ApiFuture future = streamWriter.append(data, offset); + ApiFutures.addCallback( + future, new DataWriter.AppendCompleteCallback(this), MoreExecutors.directExecutor()); + // Increase the count of in-flight requests. + inflightRequestCount.register(); + } + + public void cleanup(BigQueryWriteClient client) { + // Wait for all in-flight requests to complete. + inflightRequestCount.arriveAndAwaitAdvance(); + + // Close the connection to the server. + streamWriter.close(); + + // Verify that no error occurred in the stream. + synchronized (this.lock) { + if (this.error != null) { + throw this.error; + } + } + + // Finalize the stream. + FinalizeWriteStreamResponse finalizeResponse = + client.finalizeWriteStream(streamWriter.getStreamName()); + System.out.println("Rows written: " + finalizeResponse.getRowCount()); + } + + public String getStreamName() { + return streamWriter.getStreamName(); + } + + static class AppendCompleteCallback implements ApiFutureCallback { + + private final DataWriter parent; + + public AppendCompleteCallback(DataWriter parent) { + this.parent = parent; + } + + public void onSuccess(AppendRowsResponse response) { + System.out.format("Append %d success\n", response.getAppendResult().getOffset().getValue()); + done(); + } + + public void onFailure(Throwable throwable) { + synchronized (this.parent.lock) { + if (this.parent.error == null) { + StorageException storageException = Exceptions.toStorageException(throwable); + this.parent.error = + (storageException != null) ? storageException : new RuntimeException(throwable); + } + } + System.out.format("Error: %s\n", throwable.toString()); + done(); + } + + private void done() { + // Reduce the count of in-flight requests. + this.parent.inflightRequestCount.arriveAndDeregister(); + } + } + } +} +// [END bigquerystorage_jsonstreamwriter_committed] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteNestedProto.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteNestedProto.java new file mode 100644 index 000000000000..683602da26fe --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteNestedProto.java @@ -0,0 +1,73 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_writenestedproto] +import com.google.api.core.ApiFuture; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.ProtoRows; +import com.google.cloud.bigquery.storage.v1.ProtoSchemaConverter; +import com.google.cloud.bigquery.storage.v1.StreamWriter; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +public class WriteNestedProto { + + public static void runWriteNestedProto(String projectId, String datasetName, String tableName) + throws DescriptorValidationException, InterruptedException, IOException { + StreamWriter streamWriter = + StreamWriter.newBuilder( + "projects/" + + projectId + + "/datasets/" + + datasetName + + "/tables/" + + tableName + + "/_default") + .setWriterSchema(ProtoSchemaConverter.convert(HasNestedMessage.getDescriptor())) + .build(); + ProtoRows protoRows = + ProtoRows.newBuilder() + .addSerializedRows( + HasNestedMessage.newBuilder() + .setFoo("foo") + .setBar( + HasNestedMessage.InnerMessage.newBuilder() + .setMyInt(12345) + .setMyString("bar") + .build()) + .build() + .toByteString()) + .addSerializedRows( + HasSeparateNestedMessage.newBuilder() + .setFoo("foo2") + .setBar( + SeparateMessage.newBuilder().setMyInt(123456).setMyString("bar2").build()) + .build() + .toByteString()) + .build(); + ApiFuture future = streamWriter.append(protoRows); + try { + AppendRowsResponse response = future.get(); + System.out.println("Appended records successfully."); + } catch (ExecutionException e) { + System.out.println(e); + } + } +} +// [END bigquerystorage_writenestedproto] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WritePendingStream.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WritePendingStream.java new file mode 100644 index 000000000000..c7a4eb2fe9a2 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WritePendingStream.java @@ -0,0 +1,234 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_jsonstreamwriter_pending] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.Exceptions; +import com.google.cloud.bigquery.storage.v1.Exceptions.StorageException; +import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import com.google.cloud.bigquery.storage.v1.StorageError; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Phaser; +import javax.annotation.concurrent.GuardedBy; +import org.json.JSONArray; +import org.json.JSONObject; +import org.threeten.bp.Duration; + +public class WritePendingStream { + + public static void runWritePendingStream() + throws DescriptorValidationException, InterruptedException, IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "MY_PROJECT_ID"; + String datasetName = "MY_DATASET_NAME"; + String tableName = "MY_TABLE_NAME"; + + writePendingStream(projectId, datasetName, tableName); + } + + public static void writePendingStream(String projectId, String datasetName, String tableName) + throws DescriptorValidationException, InterruptedException, IOException { + BigQueryWriteClient client = BigQueryWriteClient.create(); + TableName parentTable = TableName.of(projectId, datasetName, tableName); + + DataWriter writer = new DataWriter(); + // One time initialization. + writer.initialize(parentTable, client); + + try { + // Write two batches of fake data to the stream, each with 10 JSON records. Data may be + // batched up to the maximum request size: + // https://cloud.google.com/bigquery/quotas#write-api-limits + long offset = 0; + for (int i = 0; i < 2; i++) { + // Create a JSON object that is compatible with the table schema. + JSONArray jsonArr = new JSONArray(); + for (int j = 0; j < 10; j++) { + JSONObject record = new JSONObject(); + record.put("col1", String.format("batch-record %03d-%03d", i, j)); + jsonArr.put(record); + } + writer.append(jsonArr, offset); + offset += jsonArr.length(); + } + } catch (ExecutionException e) { + // If the wrapped exception is a StatusRuntimeException, check the state of the operation. + // If the state is INTERNAL, CANCELLED, or ABORTED, you can retry. For more information, see: + // https://grpc.github.io/grpc-java/javadoc/io/grpc/StatusRuntimeException.html + System.out.println("Failed to append records. \n" + e); + } + + // Final cleanup for the stream. + writer.cleanup(client); + System.out.println("Appended records successfully."); + + // Once all streams are done, if all writes were successful, commit all of them in one request. + // This example only has the one stream. If any streams failed, their workload may be + // retried on a new stream, and then only the successful stream should be included in the + // commit. + BatchCommitWriteStreamsRequest commitRequest = + BatchCommitWriteStreamsRequest.newBuilder() + .setParent(parentTable.toString()) + .addWriteStreams(writer.getStreamName()) + .build(); + BatchCommitWriteStreamsResponse commitResponse = client.batchCommitWriteStreams(commitRequest); + // If the response does not have a commit time, it means the commit operation failed. + if (commitResponse.hasCommitTime() == false) { + for (StorageError err : commitResponse.getStreamErrorsList()) { + System.out.println(err.getErrorMessage()); + } + throw new RuntimeException("Error committing the streams"); + } + System.out.println("Appended and committed records successfully."); + } + + // A simple wrapper object showing how the stateful stream writer should be used. + private static class DataWriter { + + private JsonStreamWriter streamWriter; + // Track the number of in-flight requests to wait for all responses before shutting down. + private final Phaser inflightRequestCount = new Phaser(1); + + private final Object lock = new Object(); + + @GuardedBy("lock") + private RuntimeException error = null; + + void initialize(TableName parentTable, BigQueryWriteClient client) + throws IOException, DescriptorValidationException, InterruptedException { + // Initialize a write stream for the specified table. + // For more information on WriteStream.Type, see: + // https://googleapis.dev/java/google-cloud-bigquerystorage/latest/com/google/cloud/bigquery/storage/v1/WriteStream.Type.html + WriteStream stream = WriteStream.newBuilder().setType(WriteStream.Type.PENDING).build(); + + // Configure in-stream automatic retry settings. + // Error codes that are immediately retried: + // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED + // Error codes that are retried with exponential backoff: + // * RESOURCE_EXHAUSTED + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .build(); + + CreateWriteStreamRequest createWriteStreamRequest = + CreateWriteStreamRequest.newBuilder() + .setParent(parentTable.toString()) + .setWriteStream(stream) + .build(); + WriteStream writeStream = client.createWriteStream(createWriteStreamRequest); + + // Use the JSON stream writer to send records in JSON format. + // For more information about JsonStreamWriter, see: + // https://cloud.google.com/java/docs/reference/google-cloud-bigquerystorage/latest/com.google.cloud.bigquery.storage.v1.JsonStreamWriter + streamWriter = + JsonStreamWriter.newBuilder(writeStream.getName(), writeStream.getTableSchema()) + .setRetrySettings(retrySettings) + .build(); + } + + public void append(JSONArray data, long offset) + throws DescriptorValidationException, IOException, ExecutionException { + synchronized (this.lock) { + // If earlier appends have failed, we need to reset before continuing. + if (this.error != null) { + throw this.error; + } + } + // Append asynchronously for increased throughput. + ApiFuture future = streamWriter.append(data, offset); + ApiFutures.addCallback( + future, new AppendCompleteCallback(this), MoreExecutors.directExecutor()); + // Increase the count of in-flight requests. + inflightRequestCount.register(); + } + + public void cleanup(BigQueryWriteClient client) { + // Wait for all in-flight requests to complete. + inflightRequestCount.arriveAndAwaitAdvance(); + + // Close the connection to the server. + streamWriter.close(); + + // Verify that no error occurred in the stream. + synchronized (this.lock) { + if (this.error != null) { + throw this.error; + } + } + + // Finalize the stream. + FinalizeWriteStreamResponse finalizeResponse = + client.finalizeWriteStream(streamWriter.getStreamName()); + System.out.println("Rows written: " + finalizeResponse.getRowCount()); + } + + public String getStreamName() { + return streamWriter.getStreamName(); + } + + static class AppendCompleteCallback implements ApiFutureCallback { + + private final DataWriter parent; + + public AppendCompleteCallback(DataWriter parent) { + this.parent = parent; + } + + public void onSuccess(AppendRowsResponse response) { + System.out.format("Append %d success\n", response.getAppendResult().getOffset().getValue()); + done(); + } + + public void onFailure(Throwable throwable) { + synchronized (this.parent.lock) { + if (this.parent.error == null) { + StorageException storageException = Exceptions.toStorageException(throwable); + this.parent.error = + (storageException != null) ? storageException : new RuntimeException(throwable); + } + } + System.out.format("Error: %s\n", throwable.toString()); + done(); + } + + private void done() { + // Reduce the count of in-flight requests. + this.parent.inflightRequestCount.arriveAndDeregister(); + } + } + } +} +// [END bigquerystorage_jsonstreamwriter_pending] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStream.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStream.java new file mode 100644 index 000000000000..91dcd78b2a3f --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStream.java @@ -0,0 +1,338 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_jsonstreamwriter_default] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.batching.FlowControlSettings; +import com.google.api.gax.core.FixedExecutorProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.QueryJobConfiguration; +import com.google.cloud.bigquery.TableResult; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteSettings; +import com.google.cloud.bigquery.storage.v1.Exceptions; +import com.google.cloud.bigquery.storage.v1.Exceptions.AppendSerializationError; +import com.google.cloud.bigquery.storage.v1.Exceptions.MaximumRequestCallbackWaitTimeExceededException; +import com.google.cloud.bigquery.storage.v1.Exceptions.StorageException; +import com.google.cloud.bigquery.storage.v1.Exceptions.StreamWriterClosedException; +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.ByteString; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.Phaser; +import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.concurrent.GuardedBy; +import org.json.JSONArray; +import org.json.JSONObject; +import org.threeten.bp.Duration; + +public class WriteToDefaultStream { + + public static void runWriteToDefaultStream() + throws DescriptorValidationException, InterruptedException, IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "MY_PROJECT_ID"; + String datasetName = "MY_DATASET_NAME"; + String tableName = "MY_TABLE_NAME"; + writeToDefaultStream(projectId, datasetName, tableName); + } + + private static ByteString buildByteString() { + byte[] bytes = new byte[] {1, 2, 3, 4, 5}; + return ByteString.copyFrom(bytes); + } + + // Create a JSON object that is compatible with the table schema. + private static JSONObject buildRecord(int i, int j) { + JSONObject record = new JSONObject(); + StringBuilder sbSuffix = new StringBuilder(); + for (int k = 0; k < j; k++) { + sbSuffix.append(k); + } + record.put("test_string", String.format("record %03d-%03d %s", i, j, sbSuffix.toString())); + ByteString byteString = buildByteString(); + record.put("test_bytes", byteString); + record.put( + "test_geo", + "POLYGON((-124.49 47.35,-124.49 40.73,-116.49 40.73,-116.49 47.35,-124.49 47.35))"); + return record; + } + + public static void writeToDefaultStream(String projectId, String datasetName, String tableName) + throws DescriptorValidationException, InterruptedException, IOException { + TableName parentTable = TableName.of(projectId, datasetName, tableName); + + DataWriter writer = new DataWriter(); + // One time initialization for the worker. + writer.initialize(parentTable); + + // Write two batches of fake data to the stream, each with 10 JSON records. Data may be + // batched up to the maximum request size: + // https://cloud.google.com/bigquery/quotas#write-api-limits + for (int i = 0; i < 2; i++) { + JSONArray jsonArr = new JSONArray(); + for (int j = 0; j < 10; j++) { + JSONObject record = buildRecord(i, j); + jsonArr.put(record); + } + + writer.append(new AppendContext(jsonArr)); + } + + // Final cleanup for the stream during worker teardown. + writer.cleanup(); + verifyExpectedRowCount(parentTable, 12L); + System.out.println("Appended records successfully."); + } + + private static void verifyExpectedRowCount(TableName parentTable, long expectedRowCount) + throws InterruptedException { + String queryRowCount = + "SELECT COUNT(*) FROM `" + + parentTable.getProject() + + "." + + parentTable.getDataset() + + "." + + parentTable.getTable() + + "`"; + QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(queryRowCount).build(); + BigQuery bigquery = BigQueryOptions.getDefaultInstance().getService(); + TableResult results = bigquery.query(queryConfig); + long countRowsActual = + Long.parseLong(results.getValues().iterator().next().get("f0_").getStringValue()); + if (countRowsActual != expectedRowCount) { + throw new RuntimeException( + "Unexpected row count. Expected: " + expectedRowCount + ". Actual: " + countRowsActual); + } + } + + private static class AppendContext { + + JSONArray data; + + AppendContext(JSONArray data) { + this.data = data; + } + } + + private static class DataWriter { + + private static final int MAX_RECREATE_COUNT = 3; + + private BigQueryWriteClient client; + + // Track the number of in-flight requests to wait for all responses before shutting down. + private final Phaser inflightRequestCount = new Phaser(1); + private final Object lock = new Object(); + private JsonStreamWriter streamWriter; + + @GuardedBy("lock") + private RuntimeException error = null; + + private AtomicInteger recreateCount = new AtomicInteger(0); + + private JsonStreamWriter createStreamWriter(String tableName) + throws DescriptorValidationException, IOException, InterruptedException { + // Configure in-stream automatic retry settings. + // Error codes that are immediately retried: + // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED + // Error codes that are retried with exponential backoff: + // * RESOURCE_EXHAUSTED + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .build(); + + // Use the JSON stream writer to send records in JSON format. Specify the table name to write + // to the default stream. + // For more information about JsonStreamWriter, see: + // https://googleapis.dev/java/google-cloud-bigquerystorage/latest/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.html + return JsonStreamWriter.newBuilder(tableName, client) + .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(10))) + .setChannelProvider( + BigQueryWriteSettings.defaultGrpcTransportProviderBuilder() + .setKeepAliveTime(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveTimeout(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveWithoutCalls(true) + .setChannelsPerCpu(2) + .build()) + .setEnableConnectionPool(true) + // This will allow connection pool to scale up better. + .setFlowControlSettings( + FlowControlSettings.newBuilder().setMaxOutstandingElementCount(100L).build()) + // If value is missing in json and there is a default value configured on bigquery + // column, apply the default value to the missing value field. + .setDefaultMissingValueInterpretation( + AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE) + .setRetrySettings(retrySettings) + .build(); + } + + public void initialize(TableName parentTable) + throws DescriptorValidationException, IOException, InterruptedException { + // Initialize client without settings, internally within stream writer a new client will be + // created with full settings. + client = BigQueryWriteClient.create(); + + streamWriter = createStreamWriter(parentTable.toString()); + } + + public void append(AppendContext appendContext) + throws DescriptorValidationException, IOException, InterruptedException { + synchronized (this.lock) { + if (!streamWriter.isUserClosed() + && streamWriter.isClosed() + && recreateCount.getAndIncrement() < MAX_RECREATE_COUNT) { + streamWriter = createStreamWriter(streamWriter.getStreamName()); + this.error = null; + } + // If earlier appends have failed, we need to reset before continuing. + if (this.error != null) { + throw this.error; + } + } + // Append asynchronously for increased throughput. + ApiFuture future = streamWriter.append(appendContext.data); + ApiFutures.addCallback( + future, new AppendCompleteCallback(this, appendContext), MoreExecutors.directExecutor()); + + // Increase the count of in-flight requests. + inflightRequestCount.register(); + } + + public void cleanup() { + // Wait for all in-flight requests to complete. + inflightRequestCount.arriveAndAwaitAdvance(); + + client.close(); + // Close the connection to the server. + streamWriter.close(); + + // Verify that no error occurred in the stream. + synchronized (this.lock) { + if (this.error != null) { + throw this.error; + } + } + } + + static class AppendCompleteCallback implements ApiFutureCallback { + + private final DataWriter parent; + private final AppendContext appendContext; + + public AppendCompleteCallback(DataWriter parent, AppendContext appendContext) { + this.parent = parent; + this.appendContext = appendContext; + } + + public void onSuccess(AppendRowsResponse response) { + System.out.format("Append success\n"); + this.parent.recreateCount.set(0); + done(); + } + + public void onFailure(Throwable throwable) { + if (throwable instanceof AppendSerializationError) { + AppendSerializationError ase = (AppendSerializationError) throwable; + Map rowIndexToErrorMessage = ase.getRowIndexToErrorMessage(); + if (rowIndexToErrorMessage.size() > 0) { + // Omit the faulty rows + JSONArray dataNew = new JSONArray(); + for (int i = 0; i < appendContext.data.length(); i++) { + if (!rowIndexToErrorMessage.containsKey(i)) { + dataNew.put(appendContext.data.get(i)); + } else { + // process faulty rows by placing them on a dead-letter-queue, for instance + } + } + + // Retry the remaining valid rows, but using a separate thread to + // avoid potentially blocking while we are in a callback. + if (dataNew.length() > 0) { + try { + this.parent.append(new AppendContext(dataNew)); + } catch (DescriptorValidationException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + // Mark the existing attempt as done since we got a response for it + done(); + return; + } + } + + boolean resendRequest = false; + if (throwable instanceof MaximumRequestCallbackWaitTimeExceededException) { + resendRequest = true; + } else if (throwable instanceof StreamWriterClosedException) { + if (!parent.streamWriter.isUserClosed()) { + resendRequest = true; + } + } + if (resendRequest) { + // Retry this request. + try { + this.parent.append(new AppendContext(appendContext.data)); + } catch (DescriptorValidationException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + // Mark the existing attempt as done since we got a response for it + done(); + return; + } + + synchronized (this.parent.lock) { + if (this.parent.error == null) { + StorageException storageException = Exceptions.toStorageException(throwable); + this.parent.error = + (storageException != null) ? storageException : new RuntimeException(throwable); + } + } + done(); + } + + private void done() { + // Reduce the count of in-flight requests. + this.parent.inflightRequestCount.arriveAndDeregister(); + } + } + } +} +// [END bigquerystorage_jsonstreamwriter_default] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJson.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJson.java new file mode 100644 index 000000000000..9bcb32d764c8 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJson.java @@ -0,0 +1,312 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_timestamp_jsonstreamwriter_default] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.batching.FlowControlSettings; +import com.google.api.gax.core.FixedExecutorProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.QueryJobConfiguration; +import com.google.cloud.bigquery.TableResult; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteSettings; +import com.google.cloud.bigquery.storage.v1.Exceptions; +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import java.io.IOException; +import java.time.Instant; +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.Phaser; +import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.concurrent.GuardedBy; +import org.json.JSONArray; +import org.json.JSONObject; +import org.threeten.bp.Duration; + +public class WriteToDefaultStreamTimestampJson { + + public static void runWriteToDefaultStream() + throws Descriptors.DescriptorValidationException, InterruptedException, IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "MY_PROJECT_ID"; + String datasetName = "MY_DATASET_NAME"; + String tableName = "MY_TABLE_NAME"; + writeToDefaultStream(projectId, datasetName, tableName); + } + + // Create a JSON object that is compatible with the table schema. + private static JSONObject buildRecord() { + JSONObject record = new JSONObject(); + record.put("timestampField", Instant.now().toString()); + return record; + } + + public static void writeToDefaultStream(String projectId, String datasetName, String tableName) + throws Descriptors.DescriptorValidationException, InterruptedException, IOException { + TableName parentTable = TableName.of(projectId, datasetName, tableName); + + DataWriter writer = new DataWriter(); + // One time initialization for the worker. + writer.initialize(parentTable); + + // Write two batches of fake data to the stream, each with 10 JSON records. Data may be + // batched up to the maximum request size: + // https://cloud.google.com/bigquery/quotas#write-api-limits + for (int i = 0; i < 2; i++) { + JSONArray jsonArr = new JSONArray(); + for (int j = 0; j < 10; j++) { + jsonArr.put(buildRecord()); + } + + writer.append(new AppendContext(jsonArr)); + } + + // Final cleanup for the stream during worker teardown. + writer.cleanup(); + verifyExpectedRowCount(parentTable, 20L); + System.out.println("Appended records successfully."); + } + + private static void verifyExpectedRowCount(TableName parentTable, long expectedRowCount) + throws InterruptedException { + String queryRowCount = + "SELECT COUNT(*) FROM `" + + parentTable.getProject() + + "." + + parentTable.getDataset() + + "." + + parentTable.getTable() + + "`"; + QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(queryRowCount).build(); + BigQuery bigquery = BigQueryOptions.getDefaultInstance().getService(); + TableResult results = bigquery.query(queryConfig); + long countRowsActual = + Long.parseLong(results.getValues().iterator().next().get("f0_").getStringValue()); + if (countRowsActual != expectedRowCount) { + throw new RuntimeException( + "Unexpected row count. Expected: " + expectedRowCount + ". Actual: " + countRowsActual); + } + } + + private static class AppendContext { + JSONArray data; + + AppendContext(JSONArray data) { + this.data = data; + } + } + + private static class DataWriter { + + private static final int MAX_RECREATE_COUNT = 3; + + private BigQueryWriteClient client; + + // Track the number of in-flight requests to wait for all responses before shutting down. + private final Phaser inflightRequestCount = new Phaser(1); + private final Object lock = new Object(); + private JsonStreamWriter streamWriter; + + @GuardedBy("lock") + private RuntimeException error = null; + + private final AtomicInteger recreateCount = new AtomicInteger(0); + + private JsonStreamWriter createStreamWriter(String tableName) + throws Descriptors.DescriptorValidationException, IOException, InterruptedException { + // Configure in-stream automatic retry settings. + // Error codes that are immediately retried: + // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED + // Error codes that are retried with exponential backoff: + // * RESOURCE_EXHAUSTED + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .build(); + + // Use the JSON stream writer to send records in JSON format. Specify the table name to write + // to the default stream. + // For more information about JsonStreamWriter, see: + // https://googleapis.dev/java/google-cloud-bigquerystorage/latest/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.html + return JsonStreamWriter.newBuilder(tableName, client) + .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(10))) + .setChannelProvider( + BigQueryWriteSettings.defaultGrpcTransportProviderBuilder() + .setKeepAliveTime(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveTimeout(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveWithoutCalls(true) + .build()) + .setEnableConnectionPool(true) + // This will allow connection pool to scale up better. + .setFlowControlSettings( + FlowControlSettings.newBuilder().setMaxOutstandingElementCount(100L).build()) + // If value is missing in json and there is a default value configured on bigquery + // column, apply the default value to the missing value field. + .setDefaultMissingValueInterpretation( + AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE) + .setRetrySettings(retrySettings) + .build(); + } + + public void initialize(TableName parentTable) + throws Descriptors.DescriptorValidationException, IOException, InterruptedException { + // Initialize client without settings, internally within stream writer a new client will be + // created with full settings. + client = BigQueryWriteClient.create(); + + streamWriter = createStreamWriter(parentTable.toString()); + } + + public void append(AppendContext appendContext) + throws Descriptors.DescriptorValidationException, IOException, InterruptedException { + synchronized (this.lock) { + if (!streamWriter.isUserClosed() + && streamWriter.isClosed() + && recreateCount.getAndIncrement() < MAX_RECREATE_COUNT) { + streamWriter = createStreamWriter(streamWriter.getStreamName()); + this.error = null; + } + // If earlier appends have failed, we need to reset before continuing. + if (this.error != null) { + throw this.error; + } + } + // Append asynchronously for increased throughput. + ApiFuture future = streamWriter.append(appendContext.data); + ApiFutures.addCallback( + future, new AppendCompleteCallback(this, appendContext), MoreExecutors.directExecutor()); + + // Increase the count of in-flight requests. + inflightRequestCount.register(); + } + + public void cleanup() { + // Wait for all in-flight requests to complete. + inflightRequestCount.arriveAndAwaitAdvance(); + + client.close(); + // Close the connection to the server. + streamWriter.close(); + + // Verify that no error occurred in the stream. + synchronized (this.lock) { + if (this.error != null) { + throw this.error; + } + } + } + + static class AppendCompleteCallback implements ApiFutureCallback { + + private final DataWriter parent; + private final AppendContext appendContext; + + public AppendCompleteCallback(DataWriter parent, AppendContext appendContext) { + this.parent = parent; + this.appendContext = appendContext; + } + + public void onSuccess(AppendRowsResponse response) { + System.out.println("Append success"); + this.parent.recreateCount.set(0); + done(); + } + + public void onFailure(Throwable throwable) { + if (throwable instanceof Exceptions.AppendSerializationError) { + Exceptions.AppendSerializationError ase = (Exceptions.AppendSerializationError) throwable; + Map rowIndexToErrorMessage = ase.getRowIndexToErrorMessage(); + if (!rowIndexToErrorMessage.isEmpty()) { + // Omit the faulty rows + JSONArray dataNew = new JSONArray(); + for (int i = 0; i < appendContext.data.length(); i++) { + if (!rowIndexToErrorMessage.containsKey(i)) { + dataNew.put(appendContext.data.get(i)); + } else { + // process faulty rows by placing them on a dead-letter-queue, for instance + } + } + + // Retry the remaining valid rows, but using a separate thread to + // avoid potentially blocking while we are in a callback. + if (!dataNew.isEmpty()) { + try { + this.parent.append(new AppendContext(dataNew)); + } catch (DescriptorValidationException | IOException | InterruptedException e) { + throw new RuntimeException(e); + } + } + // Mark the existing attempt as done since we got a response for it + done(); + return; + } + } + + boolean resendRequest = false; + if (throwable instanceof Exceptions.MaximumRequestCallbackWaitTimeExceededException) { + resendRequest = true; + } else if (throwable instanceof Exceptions.StreamWriterClosedException) { + if (!parent.streamWriter.isUserClosed()) { + resendRequest = true; + } + } + if (resendRequest) { + // Retry this request. + try { + this.parent.append(new AppendContext(appendContext.data)); + } catch (Descriptors.DescriptorValidationException + | IOException + | InterruptedException e) { + throw new RuntimeException(e); + } + // Mark the existing attempt as done since we got a response for it + done(); + return; + } + + synchronized (this.parent.lock) { + if (this.parent.error == null) { + Exceptions.StorageException storageException = Exceptions.toStorageException(throwable); + this.parent.error = + (storageException != null) ? storageException : new RuntimeException(throwable); + } + } + done(); + } + + private void done() { + // Reduce the count of in-flight requests. + this.parent.inflightRequestCount.arriveAndDeregister(); + } + } + } +} +// [END bigquerystorage_timestamp_jsonstreamwriter_default] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrow.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrow.java new file mode 100644 index 000000000000..6797aea936db --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrow.java @@ -0,0 +1,367 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_timestamp_streamwriter_default_arrow] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.FixedExecutorProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.QueryJobConfiguration; +import com.google.cloud.bigquery.TableResult; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteSettings; +import com.google.cloud.bigquery.storage.v1.Exceptions; +import com.google.cloud.bigquery.storage.v1.StreamWriter; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import java.io.IOException; +import java.time.Instant; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.Phaser; +import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.concurrent.GuardedBy; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.TimeStampNanoTZVector; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.VectorUnloader; +import org.apache.arrow.vector.compression.CompressionCodec; +import org.apache.arrow.vector.compression.CompressionUtil; +import org.apache.arrow.vector.compression.NoCompressionCodec; +import org.apache.arrow.vector.ipc.message.ArrowRecordBatch; +import org.apache.arrow.vector.types.TimeUnit; +import org.apache.arrow.vector.types.pojo.ArrowType; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.types.pojo.FieldType; +import org.apache.arrow.vector.types.pojo.Schema; +import org.threeten.bp.Duration; + +/** + * This class demonstrates how to ingest data using Arrow format into BigQuery via the default + * stream. It initiates a DataWriter to establish a connection to BigQuery and reuses this + * connection to continuously ingest data. + * + *

Depending on the JDK version, you may need to include this into your VM options: {@code + * --add-opens=java.base/java.nio=org.apache.arrow.memory.core,ALL-UNNAMED}. See the documentation for + * more information. + */ +public class WriteToDefaultStreamTimestampWithArrow { + + public static final long NANOS = 1000000000L; + + public static void main(String[] args) throws InterruptedException, IOException { + if (args.length < 3) { + System.out.println( + "Usage: WriteToDefaultStreamWithArrow "); + return; + } + String projectId = args[0]; + String datasetName = args[1]; + String tableName = args[2]; + // For this sample, the table schema should contain 3 fields: + // ['timestampField': TIMESTAMP] + writeToDefaultStreamWithArrow(projectId, datasetName, tableName); + } + + private static Schema createArrowSchema() { + List fields = + ImmutableList.of( + new Field( + "timestampField", + FieldType.nullable(new ArrowType.Timestamp(TimeUnit.NANOSECOND, "UTC")), + null)); + return new Schema(fields, null); + } + + // Create an ArrowRecordBatch object that is compatible with the table schema. + private static ArrowRecordBatch buildRecordBatch(VectorSchemaRoot root, int rowCount) { + TimeStampNanoTZVector timestampField = (TimeStampNanoTZVector) root.getVector("timestampField"); + timestampField.allocateNew(rowCount); + + Instant now = Instant.now(); + for (int i = 0; i < rowCount; i++) { + timestampField.set(i, now.getEpochSecond() * NANOS + now.getNano()); + } + root.setRowCount(rowCount); + + CompressionCodec codec = + NoCompressionCodec.Factory.INSTANCE.createCodec(CompressionUtil.CodecType.NO_COMPRESSION); + VectorUnloader vectorUnloader = + new VectorUnloader(root, /* includeNullCount= */ true, codec, /* alignBuffers= */ true); + return vectorUnloader.getRecordBatch(); + } + + public static void writeToDefaultStreamWithArrow( + String projectId, String datasetName, String tableName) + throws InterruptedException, IOException { + TableName parentTable = TableName.of(projectId, datasetName, tableName); + Schema arrowSchema = createArrowSchema(); + DataWriter writer = new DataWriter(); + // One time initialization for the worker. + writer.initialize(parentTable, arrowSchema); + long initialRowCount = getRowCount(parentTable); + try (BufferAllocator allocator = new RootAllocator()) { + // A writer should be used to ingest as much data as possible before teardown. + // Append 100 batches. + for (int i = 0; i < 100; i++) { + try (VectorSchemaRoot root = VectorSchemaRoot.create(arrowSchema, allocator)) { + // Each batch has 10 rows. + ArrowRecordBatch batch = buildRecordBatch(root, 10); + + // Asynchronous append. + writer.append(new ArrowData(arrowSchema, batch)); + } + } + } + // Final cleanup for the stream during worker teardown. + // It's blocked until all append requests' response are received. + writer.cleanup(); + + verifyExpectedRowCount(parentTable, initialRowCount + 1000); + System.out.println("Appended records successfully."); + } + + private static long getRowCount(TableName parentTable) throws InterruptedException { + String queryRowCount = + "SELECT COUNT(*) FROM `" + + parentTable.getProject() + + "." + + parentTable.getDataset() + + "." + + parentTable.getTable() + + "`"; + QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(queryRowCount).build(); + BigQuery bigquery = + BigQueryOptions.newBuilder().setProjectId(parentTable.getProject()).build().getService(); + TableResult results = bigquery.query(queryConfig); + return Long.parseLong(results.getValues().iterator().next().get("f0_").getStringValue()); + } + + private static void verifyExpectedRowCount(TableName parentTable, long expectedRowCount) + throws InterruptedException { + String queryRowCount = + "SELECT COUNT(*) FROM `" + + parentTable.getProject() + + "." + + parentTable.getDataset() + + "." + + parentTable.getTable() + + "`"; + QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(queryRowCount).build(); + BigQuery bigquery = + BigQueryOptions.newBuilder().setProjectId(parentTable.getProject()).build().getService(); + TableResult results = bigquery.query(queryConfig); + int countRowsActual = + Integer.parseInt(results.getValues().iterator().next().get("f0_").getStringValue()); + if (countRowsActual != expectedRowCount) { + throw new RuntimeException( + "Unexpected row count. Expected: " + expectedRowCount + ". Actual: " + countRowsActual); + } + } + + private static class ArrowData { + Schema arrowSchema; + ArrowRecordBatch data; + + ArrowData(Schema arrowSchema, ArrowRecordBatch data) { + this.arrowSchema = arrowSchema; + this.data = data; + } + } + + private static class DataWriter { + + private static final int MAX_RECREATE_COUNT = 3; + + private BigQueryWriteClient client; + + // Track the number of in-flight requests to wait for all responses before shutting down. + private final Phaser inflightRequestCount = new Phaser(1); + private final Object lock = new Object(); + + private Schema arrowSchema; + private StreamWriter streamWriter; + + @GuardedBy("lock") + private RuntimeException error = null; + + private final AtomicInteger recreateCount = new AtomicInteger(0); + + private StreamWriter createStreamWriter(String streamName, Schema arrowSchema) + throws IOException { + // Configure in-stream automatic retry settings. + // Error codes that are immediately retried: + // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED + // Error codes that are retried with exponential backoff: + // * RESOURCE_EXHAUSTED + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .build(); + + // Use the Stream writer to send records in Arrow format. Specify the table name to write + // to the default stream. + // For more information about StreamWriter, see: + // https://cloud.google.com/java/docs/reference/google-cloud-bigquerystorage/latest/com.google.cloud.bigquery.storage.v1.StreamWriter + return StreamWriter.newBuilder(streamName, client) + .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(10))) + .setChannelProvider( + BigQueryWriteSettings.defaultGrpcTransportProviderBuilder() + .setKeepAliveTime(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveTimeout(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveWithoutCalls(true) + .build()) + .setEnableConnectionPool(true) + // If value is missing in ArrowRecordBatch and there is a default value configured on + // bigquery column, apply the default value to the missing value field. + .setDefaultMissingValueInterpretation( + AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE) + .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) + // Set the StreamWriter with Arrow Schema, this would only allow the StreamWriter to + // append data in Arrow format. + .setWriterSchema(arrowSchema) + .setRetrySettings(retrySettings) + .build(); + } + + public void initialize(TableName parentTable, Schema arrowSchema) throws IOException { + // Initialize client without settings, internally within stream writer a new client will be + // created with full settings. + client = BigQueryWriteClient.create(); + + streamWriter = createStreamWriter(parentTable.toString() + "/_default", arrowSchema); + } + + public void append(ArrowData arrowData) throws IOException { + synchronized (this.lock) { + if (!streamWriter.isUserClosed() + && streamWriter.isClosed() + && recreateCount.getAndIncrement() < MAX_RECREATE_COUNT) { + streamWriter = createStreamWriter(streamWriter.getStreamName(), arrowData.arrowSchema); + this.error = null; + } + // If earlier appends have failed, we need to reset before continuing. + if (this.error != null) { + throw this.error; + } + } + // Append asynchronously for increased throughput. + ApiFuture future = streamWriter.append(arrowData.data); + ApiFutures.addCallback( + future, new AppendCompleteCallback(this, arrowData), MoreExecutors.directExecutor()); + + // Increase the count of in-flight requests. + inflightRequestCount.register(); + } + + public void cleanup() { + // Wait for all in-flight requests to complete. + inflightRequestCount.arriveAndAwaitAdvance(); + + client.close(); + // Close the connection to the server. + streamWriter.close(); + + // Verify that no error occurred in the stream. + synchronized (this.lock) { + if (this.error != null) { + throw this.error; + } + } + } + + static class AppendCompleteCallback implements ApiFutureCallback { + + private final DataWriter parent; + private final ArrowData arrowData; + + public AppendCompleteCallback(DataWriter parent, ArrowData arrowData) { + this.parent = parent; + this.arrowData = arrowData; + } + + public void onSuccess(AppendRowsResponse response) { + System.out.println("Append success"); + this.parent.recreateCount.set(0); + done(); + } + + public void onFailure(Throwable throwable) { + System.out.println("Append failed: " + throwable.toString()); + if (throwable instanceof Exceptions.AppendSerializationError) { + Exceptions.AppendSerializationError ase = (Exceptions.AppendSerializationError) throwable; + Map rowIndexToErrorMessage = ase.getRowIndexToErrorMessage(); + if (!rowIndexToErrorMessage.isEmpty()) { + System.out.println("row level errors: " + rowIndexToErrorMessage); + // The append returned failure with indices for faulty rows. + // Fix the faulty rows or remove them from the appended data and retry the append. + done(); + return; + } + } + + boolean resendRequest = false; + if (throwable instanceof Exceptions.MaximumRequestCallbackWaitTimeExceededException) { + resendRequest = true; + } else if (throwable instanceof Exceptions.StreamWriterClosedException) { + if (!parent.streamWriter.isUserClosed()) { + resendRequest = true; + } + } + if (resendRequest) { + // Retry this request. + try { + this.parent.append(new ArrowData(arrowData.arrowSchema, arrowData.data)); + } catch (IOException e) { + throw new RuntimeException(e); + } + // Mark the existing attempt as done since we got a response for it + done(); + return; + } + + synchronized (this.parent.lock) { + if (this.parent.error == null) { + Exceptions.StorageException storageException = Exceptions.toStorageException(throwable); + this.parent.error = + (storageException != null) ? storageException : new RuntimeException(throwable); + } + } + done(); + } + + private void done() { + // Reduce the count of in-flight requests. + this.parent.inflightRequestCount.arriveAndDeregister(); + } + } + } +} +// [END bigquerystorage_timestamp_streamwriter_default_arrow] diff --git a/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamWithArrow.java b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamWithArrow.java new file mode 100644 index 000000000000..e893e826d09e --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamWithArrow.java @@ -0,0 +1,379 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_streamwriter_default_arrow] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.FixedExecutorProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.QueryJobConfiguration; +import com.google.cloud.bigquery.TableResult; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteSettings; +import com.google.cloud.bigquery.storage.v1.Exceptions; +import com.google.cloud.bigquery.storage.v1.Exceptions.AppendSerializationError; +import com.google.cloud.bigquery.storage.v1.Exceptions.MaximumRequestCallbackWaitTimeExceededException; +import com.google.cloud.bigquery.storage.v1.Exceptions.StorageException; +import com.google.cloud.bigquery.storage.v1.Exceptions.StreamWriterClosedException; +import com.google.cloud.bigquery.storage.v1.StreamWriter; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.Phaser; +import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.concurrent.GuardedBy; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.BigIntVector; +import org.apache.arrow.vector.VarCharVector; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.VectorUnloader; +import org.apache.arrow.vector.compression.CompressionCodec; +import org.apache.arrow.vector.compression.CompressionUtil; +import org.apache.arrow.vector.compression.NoCompressionCodec; +import org.apache.arrow.vector.ipc.message.ArrowRecordBatch; +import org.apache.arrow.vector.types.pojo.ArrowType; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.types.pojo.FieldType; +import org.apache.arrow.vector.types.pojo.Schema; +import org.threeten.bp.Duration; + +/** + * This class demonstrates how to ingest data using Arrow format into BigQuery via the default + * stream. It initiates a DataWriter to establish a connection to BigQuery and reuses this + * connection to continuously ingest data. + */ +public class WriteToDefaultStreamWithArrow { + public static void main(String[] args) + throws DescriptorValidationException, InterruptedException, IOException { + if (args.length < 3) { + System.out.println( + "Usage: WriteToDefaultStreamWithArrow "); + return; + } + String projectId = args[0]; + String datasetName = args[1]; + String tableName = args[2]; + // Table schema should contain 3 fields: + // ['test_string': STRING, 'test_int': INTEGER, 'test_geo':GEOGRAPHY] + writeToDefaultStreamWithArrow(projectId, datasetName, tableName); + } + + private static Schema createArrowSchema() { + List fields = + ImmutableList.of( + new Field("test_string", FieldType.nullable(new ArrowType.Utf8()), null), + new Field("test_int", FieldType.nullable(new ArrowType.Int(64, true)), null), + new Field("test_geo", FieldType.nullable(new ArrowType.Utf8()), null)); + return new Schema(fields, null); + } + + // Create an ArrowRecordBatch object that is compatible with the table schema. + private static ArrowRecordBatch buildRecordBatch(VectorSchemaRoot root, int rowCount) { + VarCharVector testString = (VarCharVector) root.getVector("test_string"); + BigIntVector testInt = (BigIntVector) root.getVector("test_int"); + VarCharVector testGeo = (VarCharVector) root.getVector("test_geo"); + + testString.allocateNew(rowCount); + testInt.allocateNew(rowCount); + testGeo.allocateNew(rowCount); + + for (int i = 0; i < rowCount; i++) { + testString.set(i, ("A" + i).getBytes()); + testInt.set(i, i + 100); + testGeo.set( + i, + "POLYGON((-124.49 47.35,-124.49 40.73,-116.49 40.73,-113.49 47.35,-124.49 47.35))" + .getBytes()); + } + root.setRowCount(rowCount); + + CompressionCodec codec = + NoCompressionCodec.Factory.INSTANCE.createCodec(CompressionUtil.CodecType.NO_COMPRESSION); + VectorUnloader vectorUnloader = + new VectorUnloader(root, /* includeNullCount= */ true, codec, /* alignBuffers= */ true); + return vectorUnloader.getRecordBatch(); + } + + public static void writeToDefaultStreamWithArrow( + String projectId, String datasetName, String tableName) + throws DescriptorValidationException, InterruptedException, IOException { + TableName parentTable = TableName.of(projectId, datasetName, tableName); + Schema arrowSchema = createArrowSchema(); + DataWriter writer = new DataWriter(); + // One time initialization for the worker. + writer.initialize(parentTable, arrowSchema); + long initialRowCount = getRowCount(parentTable); + try (BufferAllocator allocator = new RootAllocator()) { + // A writer should be used to ingest as much data as possible before teardown. + // Append 100 batches. + for (int i = 0; i < 100; i++) { + try (VectorSchemaRoot root = VectorSchemaRoot.create(arrowSchema, allocator)) { + // Each batch has 10 rows. + ArrowRecordBatch batch = buildRecordBatch(root, 10); + + // Asynchronous append. + writer.append(new ArrowData(arrowSchema, batch)); + } + } + } + // Final cleanup for the stream during worker teardown. + // It's blocked until all append requests' response are received. + writer.cleanup(); + + verifyExpectedRowCount(parentTable, initialRowCount + 1000); + System.out.println("Appended records successfully."); + } + + private static long getRowCount(TableName parentTable) throws InterruptedException { + String queryRowCount = + "SELECT COUNT(*) FROM `" + + parentTable.getProject() + + "." + + parentTable.getDataset() + + "." + + parentTable.getTable() + + "`"; + QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(queryRowCount).build(); + BigQuery bigquery = + BigQueryOptions.newBuilder().setProjectId(parentTable.getProject()).build().getService(); + TableResult results = bigquery.query(queryConfig); + return Long.parseLong(results.getValues().iterator().next().get("f0_").getStringValue()); + } + + private static void verifyExpectedRowCount(TableName parentTable, long expectedRowCount) + throws InterruptedException { + String queryRowCount = + "SELECT COUNT(*) FROM `" + + parentTable.getProject() + + "." + + parentTable.getDataset() + + "." + + parentTable.getTable() + + "`"; + QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(queryRowCount).build(); + BigQuery bigquery = + BigQueryOptions.newBuilder().setProjectId(parentTable.getProject()).build().getService(); + TableResult results = bigquery.query(queryConfig); + long countRowsActual = + Long.parseLong(results.getValues().iterator().next().get("f0_").getStringValue()); + if (countRowsActual != expectedRowCount) { + throw new RuntimeException( + "Unexpected row count. Expected: " + expectedRowCount + ". Actual: " + countRowsActual); + } + } + + private static class ArrowData { + Schema arrowSchema; + ArrowRecordBatch data; + + ArrowData(Schema arrowSchema, ArrowRecordBatch data) { + this.arrowSchema = arrowSchema; + this.data = data; + } + } + + private static class DataWriter { + + private static final int MAX_RECREATE_COUNT = 3; + + private BigQueryWriteClient client; + + // Track the number of in-flight requests to wait for all responses before shutting down. + private final Phaser inflightRequestCount = new Phaser(1); + private final Object lock = new Object(); + + private Schema arrowSchema; + private StreamWriter streamWriter; + + @GuardedBy("lock") + private RuntimeException error = null; + + private final AtomicInteger recreateCount = new AtomicInteger(0); + + private StreamWriter createStreamWriter(String streamName, Schema arrowSchema) + throws IOException { + // Configure in-stream automatic retry settings. + // Error codes that are immediately retried: + // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED + // Error codes that are retried with exponential backoff: + // * RESOURCE_EXHAUSTED + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .build(); + + // Use the Stream writer to send records in Arrow format. Specify the table name to write + // to the default stream. + // For more information about StreamWriter, see: + // https://cloud.google.com/java/docs/reference/google-cloud-bigquerystorage/latest/com.google.cloud.bigquery.storage.v1.StreamWriter + return StreamWriter.newBuilder(streamName, client) + .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(10))) + .setChannelProvider( + BigQueryWriteSettings.defaultGrpcTransportProviderBuilder() + .setKeepAliveTime(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveTimeout(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveWithoutCalls(true) + .setChannelsPerCpu(2) + .build()) + .setEnableConnectionPool(true) + // If value is missing in ArrowRecordBatch and there is a default value configured on + // bigquery column, apply the default value to the missing value field. + .setDefaultMissingValueInterpretation( + AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE) + .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) + // Set the StreamWriter with Arrow Schema, this would only allow the StreamWriter to + // append data in Arrow format. + .setWriterSchema(arrowSchema) + .setRetrySettings(retrySettings) + .build(); + } + + public void initialize(TableName parentTable, Schema arrowSchema) + throws DescriptorValidationException, IOException, InterruptedException { + // Initialize client without settings, internally within stream writer a new client will be + // created with full settings. + client = BigQueryWriteClient.create(); + + streamWriter = createStreamWriter(parentTable.toString() + "/_default", arrowSchema); + } + + public void append(ArrowData arrowData) + throws DescriptorValidationException, IOException, InterruptedException { + synchronized (this.lock) { + if (!streamWriter.isUserClosed() + && streamWriter.isClosed() + && recreateCount.getAndIncrement() < MAX_RECREATE_COUNT) { + streamWriter = createStreamWriter(streamWriter.getStreamName(), arrowData.arrowSchema); + this.error = null; + } + // If earlier appends have failed, we need to reset before continuing. + if (this.error != null) { + throw this.error; + } + } + // Append asynchronously for increased throughput. + ApiFuture future = streamWriter.append(arrowData.data); + ApiFutures.addCallback( + future, new AppendCompleteCallback(this, arrowData), MoreExecutors.directExecutor()); + + // Increase the count of in-flight requests. + inflightRequestCount.register(); + } + + public void cleanup() { + // Wait for all in-flight requests to complete. + inflightRequestCount.arriveAndAwaitAdvance(); + + client.close(); + // Close the connection to the server. + streamWriter.close(); + + // Verify that no error occurred in the stream. + synchronized (this.lock) { + if (this.error != null) { + throw this.error; + } + } + } + + static class AppendCompleteCallback implements ApiFutureCallback { + + private final DataWriter parent; + private final ArrowData arrowData; + + public AppendCompleteCallback(DataWriter parent, ArrowData arrowData) { + this.parent = parent; + this.arrowData = arrowData; + } + + public void onSuccess(AppendRowsResponse response) { + System.out.format("Append success\n"); + this.parent.recreateCount.set(0); + done(); + } + + public void onFailure(Throwable throwable) { + System.out.format("Append failed: " + throwable.toString()); + if (throwable instanceof AppendSerializationError) { + AppendSerializationError ase = (AppendSerializationError) throwable; + Map rowIndexToErrorMessage = ase.getRowIndexToErrorMessage(); + if (rowIndexToErrorMessage.size() > 0) { + System.out.format("row level errors: " + rowIndexToErrorMessage.toString()); + // The append returned failure with indices for faulty rows. + // Fix the faulty rows or remove them from the appended data and retry the append. + done(); + return; + } + } + + boolean resendRequest = false; + if (throwable instanceof MaximumRequestCallbackWaitTimeExceededException) { + resendRequest = true; + } else if (throwable instanceof StreamWriterClosedException) { + if (!parent.streamWriter.isUserClosed()) { + resendRequest = true; + } + } + if (resendRequest) { + // Retry this request. + try { + this.parent.append(new ArrowData(arrowData.arrowSchema, arrowData.data)); + } catch (DescriptorValidationException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + // Mark the existing attempt as done since we got a response for it + done(); + return; + } + + synchronized (this.parent.lock) { + if (this.parent.error == null) { + StorageException storageException = Exceptions.toStorageException(throwable); + this.parent.error = + (storageException != null) ? storageException : new RuntimeException(throwable); + } + } + done(); + } + + private void done() { + // Reduce the count of in-flight requests. + this.parent.inflightRequestCount.arriveAndDeregister(); + } + } + } +} +// [END bigquerystorage_streamwriter_default_arrow] diff --git a/java-bigquerystorage/samples/snippets/src/main/proto/nested.proto b/java-bigquerystorage/samples/snippets/src/main/proto/nested.proto new file mode 100644 index 000000000000..63db8bd11dbf --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/proto/nested.proto @@ -0,0 +1,40 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; +package nestedprotos; + +import "separate.proto"; + +option java_multiple_files = true; +option java_package = "com.example.bigquerystorage"; +option java_outer_classname = "NestedProtos"; + +message HasNestedMessage { + optional string foo = 1; + + message InnerMessage { + optional int64 my_int = 1; + optional string my_string = 2; + } + + optional InnerMessage bar = 2; +} + +message HasSeparateNestedMessage { + optional string foo = 1; + optional SeparateMessage bar = 2; +} diff --git a/java-bigquerystorage/samples/snippets/src/main/proto/separate.proto b/java-bigquerystorage/samples/snippets/src/main/proto/separate.proto new file mode 100644 index 000000000000..8aca5219ce49 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/main/proto/separate.proto @@ -0,0 +1,27 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; +package nestedprotos; + +option java_multiple_files = true; +option java_package = "com.example.bigquerystorage"; +option java_outer_classname = "SeparateProtos"; + +message SeparateMessage { + optional int64 my_int = 1; + optional string my_string = 2; +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/ExportOpenTelemetryIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/ExportOpenTelemetryIT.java new file mode 100644 index 000000000000..78b8409c340f --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/ExportOpenTelemetryIT.java @@ -0,0 +1,125 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQuery.DatasetDeleteOption; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ExportOpenTelemetryIT { + + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static SdkMeterProvider METER_PROVIDER = null; + + private ByteArrayOutputStream bout; + private PrintStream out; + private BigQuery bigquery; + private String datasetName; + private String tableName; + + private static void requireEnvVar(String varName) { + assertNotNull( + "Environment variable " + varName + " is required to perform these tests.", + System.getenv(varName)); + } + + @BeforeClass + public static void checkRequirements() { + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + private void setupGoogleCloudMonitoringOT() { + MetricExporter metricExporter = GoogleCloudMetricExporter.createWithDefaultConfiguration(); + METER_PROVIDER = + SdkMeterProvider.builder() + .registerMetricReader( + PeriodicMetricReader.builder(metricExporter) + .setInterval(java.time.Duration.ofMillis(30000)) + .build()) + .build(); + OpenTelemetrySdk.builder().setMeterProvider(METER_PROVIDER).buildAndRegisterGlobal(); + } + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + + setupGoogleCloudMonitoringOT(); + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset and table for each test. + datasetName = "WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + tableName = "DEFAULT_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + Schema schema = + Schema.of( + com.google.cloud.bigquery.Field.newBuilder("test_string", StandardSQLTypeName.STRING) + .setMaxLength(20L) + .build(), + com.google.cloud.bigquery.Field.newBuilder("test_bytes", StandardSQLTypeName.BYTES) + .build(), + com.google.cloud.bigquery.Field.newBuilder("test_geo", StandardSQLTypeName.GEOGRAPHY) + .build()); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + TableInfo tableInfo = + TableInfo.newBuilder(TableId.of(datasetName, tableName), StandardTableDefinition.of(schema)) + .build(); + bigquery.create(tableInfo); + } + + @After + public void tearDown() { + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), DatasetDeleteOption.deleteContents()); + System.setOut(null); + if (METER_PROVIDER != null) { + METER_PROVIDER.shutdown(); + } + } + + @Test + public void testExportOpenTelemetry() throws Exception { + ExportOpenTelemetry.exportToOpenTelemetry(GOOGLE_CLOUD_PROJECT, datasetName, tableName); + assertThat(bout.toString()).contains("Appended records successfully."); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/JsonWriterStreamCdcIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/JsonWriterStreamCdcIT.java new file mode 100644 index 000000000000..87c26e4c2711 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/JsonWriterStreamCdcIT.java @@ -0,0 +1,89 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQuery.DatasetDeleteOption; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.nio.file.FileSystems; +import java.nio.file.Path; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class JsonWriterStreamCdcIT { + + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private PrintStream out; + private BigQuery bigquery; + private String datasetName; + + @BeforeClass + public static void beforeClass() {} + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset for each test. + datasetName = "JAVA_WRITER_STREAM_CDC_TEST" + UUID.randomUUID().toString().substring(0, 8); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + } + + @Test + public void testJsonWriterStreamCdc() throws Exception { + Path newCustomersDataFilePath = + FileSystems.getDefault().getPath("../snippets/src/test/resources", "NewCustomers.json"); + Path modifiedCustomersDataFilePath = + FileSystems.getDefault() + .getPath("../snippets/src/test/resources", "ModifiedCustomers.json"); + String[] args = { + GOOGLE_CLOUD_PROJECT, + datasetName, + "customers", + newCustomersDataFilePath.toAbsolutePath().toString(), + modifiedCustomersDataFilePath.toAbsolutePath().toString() + }; + + JsonWriterStreamCdc.main(args); + assertThat(bout.toString()).contains("Wrote batch"); + } + + @After + public void tearDown() { + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), DatasetDeleteOption.deleteContents()); + System.setOut(null); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/ParallelWriteCommittedStreamIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/ParallelWriteCommittedStreamIT.java new file mode 100644 index 000000000000..0575f295d06a --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/ParallelWriteCommittedStreamIT.java @@ -0,0 +1,97 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQuery.DatasetDeleteOption; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ParallelWriteCommittedStreamIT { + + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private PrintStream out; + private BigQuery bigquery; + private String datasetName; + private String tableName; + + private static void requireEnvVar(String varName) { + assertNotNull( + "Environment variable " + varName + " is required to perform these tests.", + System.getenv(varName)); + } + + @BeforeClass + public static void checkRequirements() { + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + @Before + public void setUp() { + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset and table for each test. + datasetName = "PARALLEL_WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + tableName = "PARALLEL_WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + Schema schema = Schema.of(Field.of("col1", StandardSQLTypeName.STRING)); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + TableInfo tableInfo = + TableInfo.newBuilder(TableId.of(datasetName, tableName), StandardTableDefinition.of(schema)) + .build(); + bigquery.create(tableInfo); + + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + } + + @After + public void tearDown() { + System.setOut(null); + bigquery.delete(TableId.of(GOOGLE_CLOUD_PROJECT, datasetName, tableName)); + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), DatasetDeleteOption.deleteContents()); + } + + @Test + public void testParallelWriteCommittedStream() throws Exception { + ParallelWriteCommittedStream.writeCommittedStream(GOOGLE_CLOUD_PROJECT, datasetName, tableName); + assertThat(bout.toString()).contains("All records are appended successfully."); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/QuickstartArrowSampleIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/QuickstartArrowSampleIT.java new file mode 100644 index 000000000000..9738efdbf3fe --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/QuickstartArrowSampleIT.java @@ -0,0 +1,58 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Tests for quickstart sample. */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:abbreviationaswordinname") +public class QuickstartArrowSampleIT { + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private PrintStream out; + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + } + + @After + public void tearDown() { + System.setOut(null); + } + + @Test + public void testQuickstart() throws Exception { + StorageArrowSample.main(PROJECT_ID); + String got = bout.toString(); + // Ensure at least 1k of output generated and a specific token was present in the output. + assertThat(bout.size()).isGreaterThan(1024); + assertThat(got).contains("Zayvion"); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/QuickstartSampleIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/QuickstartSampleIT.java new file mode 100644 index 000000000000..9e4d209b88d2 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/QuickstartSampleIT.java @@ -0,0 +1,58 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Tests for quickstart sample. */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:abbreviationaswordinname") +public class QuickstartSampleIT { + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private PrintStream out; + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + } + + @After + public void tearDown() { + System.setOut(null); + } + + @Test + public void testQuickstart() throws Exception { + StorageSample.main(PROJECT_ID); + String got = bout.toString(); + // Ensure at least 1k of output generated and a specific token was present in the output. + assertThat(bout.size()).isGreaterThan(1024); + assertThat(got).contains("Zayvion"); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/ReadTimestampArrowIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/ReadTimestampArrowIT.java new file mode 100644 index 000000000000..ce340fa2b60f --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/ReadTimestampArrowIT.java @@ -0,0 +1,55 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadTimestampArrowIT { + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private PrintStream out; + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + } + + @After + public void tearDown() { + System.setOut(null); + } + + @Test + public void testQuickstart() throws Exception { + ReadTimestampArrow.main(PROJECT_ID); + String got = bout.toString(); + // Ensure that `last_reported` column is in the output + assertThat(got).contains("last_reported"); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/ReadTimestampAvroIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/ReadTimestampAvroIT.java new file mode 100644 index 000000000000..c1b6bb4d882b --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/ReadTimestampAvroIT.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadTimestampAvroIT { + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + } + + @After + public void tearDown() { + System.setOut(null); + } + + @Test + public void testReadTimestampAvro() throws Exception { + ReadTimestampAvro.main(PROJECT_ID); + String got = bout.toString(); + // Ensure that `last_reported` column is in the output + assertThat(got).contains("last_reported"); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteBufferedStreamIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteBufferedStreamIT.java new file mode 100644 index 000000000000..684c70dba456 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteBufferedStreamIT.java @@ -0,0 +1,105 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQuery.DatasetDeleteOption; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.QueryJobConfiguration; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import com.google.cloud.bigquery.TableResult; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class WriteBufferedStreamIT { + + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private PrintStream out; + private BigQuery bigquery; + private String datasetName; + private String tableName; + + private static void requireEnvVar(String varName) { + assertNotNull( + "Environment variable " + varName + " is required to perform these tests.", + System.getenv(varName)); + } + + @BeforeClass + public static void checkRequirements() { + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset and table for each test. + datasetName = "WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + tableName = "PENDING_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + Schema schema = Schema.of(Field.of("col1", StandardSQLTypeName.STRING)); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + TableInfo tableInfo = + TableInfo.newBuilder(TableId.of(datasetName, tableName), StandardTableDefinition.of(schema)) + .build(); + bigquery.create(tableInfo); + } + + @After + public void tearDown() { + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), DatasetDeleteOption.deleteContents()); + System.setOut(null); + } + + @Test + public void testWriteBufferedStream() throws Exception { + WriteBufferedStream.writeBufferedStream(GOOGLE_CLOUD_PROJECT, datasetName, tableName); + assertThat(bout.toString()).contains("Appended and committed records successfully."); + + // Verify that the records are visible in the table. + String query = "SELECT * FROM " + tableName; + QueryJobConfiguration queryConfig = + QueryJobConfiguration.newBuilder(query).setDefaultDataset(datasetName).build(); + TableResult result = bigquery.query(queryConfig); + assertThat(result.getTotalRows()).isEqualTo(20); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteCommittedStreamIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteCommittedStreamIT.java new file mode 100644 index 000000000000..992201e9dc26 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteCommittedStreamIT.java @@ -0,0 +1,96 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQuery.DatasetDeleteOption; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class WriteCommittedStreamIT { + + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private PrintStream out; + private BigQuery bigquery; + private String datasetName; + private String tableName; + + private static void requireEnvVar(String varName) { + assertNotNull( + "Environment variable " + varName + " is required to perform these tests.", + System.getenv(varName)); + } + + @BeforeClass + public static void checkRequirements() { + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset and table for each test. + datasetName = "WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + tableName = "COMMITTED_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + Schema schema = Schema.of(Field.of("col1", StandardSQLTypeName.STRING)); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + TableInfo tableInfo = + TableInfo.newBuilder(TableId.of(datasetName, tableName), StandardTableDefinition.of(schema)) + .build(); + bigquery.create(tableInfo); + } + + @After + public void tearDown() { + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), DatasetDeleteOption.deleteContents()); + System.setOut(null); + } + + @Test + public void testWriteCommittedStream() throws Exception { + WriteCommittedStream.writeCommittedStream(GOOGLE_CLOUD_PROJECT, datasetName, tableName); + assertThat(bout.toString()).contains("Appended records successfully."); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteNestedProtoIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteNestedProtoIT.java new file mode 100644 index 000000000000..73924f73608e --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteNestedProtoIT.java @@ -0,0 +1,109 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQuery.DatasetDeleteOption; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.FieldList; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class WriteNestedProtoIT { + + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private PrintStream out; + private BigQuery bigquery; + private String datasetName; + private String tableName; + + private static void requireEnvVar(String varName) { + assertNotNull( + "Environment variable " + varName + " is required to perform these tests.", + System.getenv(varName)); + } + + @BeforeClass + public static void checkRequirements() { + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset and table for each test. + datasetName = "WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + tableName = "DEFAULT_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + Schema schema = + Schema.of( + com.google.cloud.bigquery.Field.newBuilder("foo", StandardSQLTypeName.STRING).build(), + com.google.cloud.bigquery.Field.newBuilder( + "bar", + StandardSQLTypeName.STRUCT, + FieldList.of( + com.google.cloud.bigquery.Field.newBuilder( + "my_int", StandardSQLTypeName.INT64) + .build(), + com.google.cloud.bigquery.Field.newBuilder( + "my_string", StandardSQLTypeName.STRING) + .build())) + .build()); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + TableInfo tableInfo = + TableInfo.newBuilder(TableId.of(datasetName, tableName), StandardTableDefinition.of(schema)) + .build(); + bigquery.create(tableInfo); + } + + @After + public void tearDown() { + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), DatasetDeleteOption.deleteContents()); + System.setOut(null); + } + + @Test + public void testWriteNestedProto() throws Exception { + WriteNestedProto.runWriteNestedProto(GOOGLE_CLOUD_PROJECT, datasetName, tableName); + assertThat(bout.toString()).contains("Appended records successfully."); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WritePendingStreamIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WritePendingStreamIT.java new file mode 100644 index 000000000000..a21d5258c1d0 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WritePendingStreamIT.java @@ -0,0 +1,96 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQuery.DatasetDeleteOption; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class WritePendingStreamIT { + + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private PrintStream out; + private BigQuery bigquery; + private String datasetName; + private String tableName; + + private static void requireEnvVar(String varName) { + assertNotNull( + "Environment variable " + varName + " is required to perform these tests.", + System.getenv(varName)); + } + + @BeforeClass + public static void checkRequirements() { + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset and table for each test. + datasetName = "WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + tableName = "PENDING_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + Schema schema = Schema.of(Field.of("col1", StandardSQLTypeName.STRING)); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + TableInfo tableInfo = + TableInfo.newBuilder(TableId.of(datasetName, tableName), StandardTableDefinition.of(schema)) + .build(); + bigquery.create(tableInfo); + } + + @After + public void tearDown() { + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), DatasetDeleteOption.deleteContents()); + System.setOut(null); + } + + @Test + public void testWritePendingStream() throws Exception { + WritePendingStream.writePendingStream(GOOGLE_CLOUD_PROJECT, datasetName, tableName); + assertThat(bout.toString()).contains("Appended and committed records successfully."); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamArrowIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamArrowIT.java new file mode 100644 index 000000000000..07ebf1338a90 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamArrowIT.java @@ -0,0 +1,104 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQuery.DatasetDeleteOption; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class WriteToDefaultStreamArrowIT { + + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private PrintStream out; + private BigQuery bigquery; + private String datasetName; + private String tableName; + + private static void requireEnvVar(String varName) { + assertNotNull( + "Environment variable " + varName + " is required to perform these tests.", + System.getenv(varName)); + } + + @BeforeClass + public static void checkRequirements() { + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset and table for each test. + datasetName = "WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + tableName = "DEFAULT_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + Schema schema = + Schema.of( + com.google.cloud.bigquery.Field.newBuilder("test_string", StandardSQLTypeName.STRING) + .setMaxLength(20L) + .build(), + com.google.cloud.bigquery.Field.newBuilder("test_int", StandardSQLTypeName.INT64) + .build(), + com.google.cloud.bigquery.Field.newBuilder("test_geo", StandardSQLTypeName.GEOGRAPHY) + .build()); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + TableInfo tableInfo = + TableInfo.newBuilder(TableId.of(datasetName, tableName), StandardTableDefinition.of(schema)) + .build(); + bigquery.create(tableInfo); + } + + @After + public void tearDown() { + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), DatasetDeleteOption.deleteContents()); + System.setOut(null); + } + + @Test + public void testWriteToDefaultStream() throws Exception { + WriteToDefaultStreamWithArrow.writeToDefaultStreamWithArrow( + GOOGLE_CLOUD_PROJECT, datasetName, tableName); + assertThat(bout.toString()).contains("Appended records successfully."); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamIT.java new file mode 100644 index 000000000000..18b6d0096e01 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamIT.java @@ -0,0 +1,103 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQuery.DatasetDeleteOption; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class WriteToDefaultStreamIT { + + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private PrintStream out; + private BigQuery bigquery; + private String datasetName; + private String tableName; + + private static void requireEnvVar(String varName) { + assertNotNull( + "Environment variable " + varName + " is required to perform these tests.", + System.getenv(varName)); + } + + @BeforeClass + public static void checkRequirements() { + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset and table for each test. + datasetName = "WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + tableName = "DEFAULT_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + Schema schema = + Schema.of( + com.google.cloud.bigquery.Field.newBuilder("test_string", StandardSQLTypeName.STRING) + .setMaxLength(20L) + .build(), + com.google.cloud.bigquery.Field.newBuilder("test_bytes", StandardSQLTypeName.BYTES) + .build(), + com.google.cloud.bigquery.Field.newBuilder("test_geo", StandardSQLTypeName.GEOGRAPHY) + .build()); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + TableInfo tableInfo = + TableInfo.newBuilder(TableId.of(datasetName, tableName), StandardTableDefinition.of(schema)) + .build(); + bigquery.create(tableInfo); + } + + @After + public void tearDown() { + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), DatasetDeleteOption.deleteContents()); + System.setOut(null); + } + + @Test + public void testWriteToDefaultStream() throws Exception { + WriteToDefaultStream.writeToDefaultStream(GOOGLE_CLOUD_PROJECT, datasetName, tableName); + assertThat(bout.toString()).contains("Appended records successfully."); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJsonIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJsonIT.java new file mode 100644 index 000000000000..04df2b2dd27f --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJsonIT.java @@ -0,0 +1,96 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class WriteToDefaultStreamTimestampJsonIT { + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private BigQuery bigquery; + private String datasetName; + private String tableName; + + private static void requireEnvVar() { + assertNotNull( + "Environment variable " + "GOOGLE_CLOUD_PROJECT" + " is required to perform these tests.", + System.getenv("GOOGLE_CLOUD_PROJECT")); + } + + @BeforeClass + public static void checkRequirements() { + requireEnvVar(); + } + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset and table for each test. + datasetName = "WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + tableName = "DEFAULT_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + Schema schema = + Schema.of(Field.newBuilder("timestampField", StandardSQLTypeName.TIMESTAMP).build()); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + TableInfo tableInfo = + TableInfo.newBuilder(TableId.of(datasetName, tableName), StandardTableDefinition.of(schema)) + .build(); + bigquery.create(tableInfo); + } + + @After + public void tearDown() { + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), + BigQuery.DatasetDeleteOption.deleteContents()); + System.setOut(null); + } + + @Test + public void testWriteToDefaultStream() throws Exception { + WriteToDefaultStreamTimestampJson.writeToDefaultStream( + GOOGLE_CLOUD_PROJECT, datasetName, tableName); + assertThat(bout.toString()).contains("Appended records successfully."); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrowIT.java b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrowIT.java new file mode 100644 index 000000000000..4cffd836a017 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrowIT.java @@ -0,0 +1,96 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class WriteToDefaultStreamTimestampWithArrowIT { + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private BigQuery bigquery; + private String datasetName; + private String tableName; + + private static void requireEnvVar() { + assertNotNull( + "Environment variable GOOGLE_CLOUD_PROJECT is required to perform these tests.", + System.getenv("GOOGLE_CLOUD_PROJECT")); + } + + @BeforeClass + public static void checkRequirements() { + requireEnvVar(); + } + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset and table for each test. + datasetName = "WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + tableName = "DEFAULT_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + Schema schema = + Schema.of(Field.newBuilder("timestampField", StandardSQLTypeName.TIMESTAMP).build()); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + TableInfo tableInfo = + TableInfo.newBuilder(TableId.of(datasetName, tableName), StandardTableDefinition.of(schema)) + .build(); + bigquery.create(tableInfo); + } + + @After + public void tearDown() { + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), + BigQuery.DatasetDeleteOption.deleteContents()); + System.setOut(null); + } + + @Test + public void testWriteToDefaultStream() throws Exception { + WriteToDefaultStreamTimestampJson.writeToDefaultStream( + GOOGLE_CLOUD_PROJECT, datasetName, tableName); + assertThat(bout.toString()).contains("Appended records successfully."); + } +} diff --git a/java-bigquerystorage/samples/snippets/src/test/resources/ModifiedCustomers.json b/java-bigquerystorage/samples/snippets/src/test/resources/ModifiedCustomers.json new file mode 100644 index 000000000000..a41554548305 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/resources/ModifiedCustomers.json @@ -0,0 +1,5 @@ +{"Customer_ID":1,"Customer_Enrollment_Date":19301,"Customer_Name":"Nick_2.0","Customer_Address":"1600AmphitheatrePkwy,MountainView,CA","Customer_Tier":"Platinum","Active_Subscriptions":"{\"Internet_Subscription\":\"Paid\",\"Music_Subscription\":\"Paid\"}","_CHANGE_TYPE":"UPSERT"} +{"Customer_ID":2,"Customer_Enrollment_Date":19318,"Customer_Name":"Heather","Customer_Address":"285FultonSt,NewYork,NY","Customer_Tier":"Commercial","Active_Subscriptions":"{\"TV_Subscription\":\"Free\"}","_CHANGE_TYPE":"UPSERT"} +{"Customer_ID":7,"_CHANGE_TYPE":"DELETE"} +{"Customer_ID":8,"_CHANGE_TYPE":"DELETE"} +{"Customer_ID":10,"Customer_Enrollment_Date":19410,"Customer_Name":"Melody","Customer_Address":"345SpearSt,SanFrancisco,CA","Customer_Tier":"Commercial","Active_Subscriptions":"{\"Music_Subscription\":\"Free\"}","_CHANGE_TYPE":"UPSERT"} \ No newline at end of file diff --git a/java-bigquerystorage/samples/snippets/src/test/resources/NewCustomers.json b/java-bigquerystorage/samples/snippets/src/test/resources/NewCustomers.json new file mode 100644 index 000000000000..50bfe34cd3c4 --- /dev/null +++ b/java-bigquerystorage/samples/snippets/src/test/resources/NewCustomers.json @@ -0,0 +1,9 @@ +{"Customer_ID":1,"Customer_Enrollment_Date":19301,"Customer_Name":"Nick","Customer_Address":"1600AmphitheatrePkwy,MountainView,CA","Customer_Tier":"Commercial","Active_Subscriptions":"{\"Internet_Subscription\":\"Trial\",\"Music_Subscription\":\"Free\"}","_CHANGE_TYPE":"UPSERT"} +{"Customer_ID":2,"Customer_Enrollment_Date":19318,"Customer_Name":"Heather","Customer_Address":"350FifthAvenue,NewYork,NY","Customer_Tier":"Commercial","Active_Subscriptions":"{}","_CHANGE_TYPE":"UPSERT"} +{"Customer_ID":3,"Customer_Enrollment_Date":19250,"Customer_Name":"Lyle","Customer_Address":"10DowningStreet,London,England","Customer_Tier":"Enterprise","Active_Subscriptions":"{\"Internet_Subscription\":\"Paid\",\"Music_Subscription\":\"Paid\"}","_CHANGE_TYPE":"UPSERT"} +{"Customer_ID":4,"Customer_Enrollment_Date":19140,"Customer_Name":"Heidi","Customer_Address":"4059MtLeeDr.,Hollywood,CA","Customer_Tier":"Commercial","Active_Subscriptions":"{\"TV_Subscription\":\"Free\"}","_CHANGE_TYPE":"UPSERT"} +{"Customer_ID":5,"Customer_Enrollment_Date":19299,"Customer_Name":"Paul","Customer_Address":"221BBakerSt,London,England","Customer_Tier":"Commercial","Active_Subscriptions":"{\"Music_Subscription\":\"Free\"}","_CHANGE_TYPE":"UPSERT"} +{"Customer_ID":6,"Customer_Enrollment_Date":19329,"Customer_Name":"Dylan","Customer_Address":"1DrCarltonBGoodlettPl,SanFrancisco,CA","Customer_Tier":"Commercial","Active_Subscriptions":"{\"TV_Subscription\":\"Trial\"}","_CHANGE_TYPE":"UPSERT"} +{"Customer_ID":7,"Customer_Enrollment_Date":19400,"Customer_Name":"Monica","Customer_Address":"PiazzadelColosseo,1,00184RomaRM,Italy","Customer_Tier":"Commercial","Active_Subscriptions":"{\"Internet_Subscription\":\"Paid\"}","_CHANGE_TYPE":"UPSERT"} +{"Customer_ID":8,"Customer_Enrollment_Date":19377,"Customer_Name":"Katie","Customer_Address":"11WallStreet,NewYork,NY","Customer_Tier":"Enterprise","Active_Subscriptions":"{\"Music_Subscription\":\"Paid\"}","_CHANGE_TYPE":"UPSERT"} +{"Customer_ID":9,"Customer_Enrollment_Date":19410,"Customer_Name":"Jeremy","Customer_Address":"1600PennsylvaniaAvenue,WashingtonDC","Customer_Tier":"Enterprise","Active_Subscriptions":"{\"Internet_Subscription\":\"Paid\",\"TV_Subscription\":\"Paid\",\"Music_Subscription\":\"Trial\"}","_CHANGE_TYPE":"UPSERT"} \ No newline at end of file diff --git a/pom.xml b/pom.xml index 96bd03560242..b0182a732d6e 100644 --- a/pom.xml +++ b/pom.xml @@ -51,6 +51,7 @@ java-bigquerydatatransfer java-bigquerymigration java-bigqueryreservation + java-bigquerystorage java-billing java-billingbudgets java-binary-authorization diff --git a/versions.txt b/versions.txt index b402889bf022..94088c593692 100644 --- a/versions.txt +++ b/versions.txt @@ -937,3 +937,14 @@ grpc-google-cloud-gkerecommender-v1:0.2.0:0.3.0-SNAPSHOT google-cloud-cloudapiregistry:0.1.0:0.2.0-SNAPSHOT proto-google-cloud-cloudapiregistry-v1beta:0.1.0:0.2.0-SNAPSHOT grpc-google-cloud-cloudapiregistry-v1beta:0.1.0:0.2.0-SNAPSHOT +google-cloud-bigquerystorage:3.19.1:3.19.1 +grpc-google-cloud-bigquerystorage-v1beta1:0.191.1:0.191.1 +grpc-google-cloud-bigquerystorage-v1beta2:0.191.1:0.191.1 +grpc-google-cloud-bigquerystorage-v1:3.19.1:3.19.1 +proto-google-cloud-bigquerystorage-v1beta1:0.191.1:0.191.1 +proto-google-cloud-bigquerystorage-v1beta2:0.191.1:0.191.1 +proto-google-cloud-bigquerystorage-v1:3.19.1:3.19.1 +grpc-google-cloud-bigquerystorage-v1alpha:3.19.1:3.19.1 +proto-google-cloud-bigquerystorage-v1alpha:3.19.1:3.19.1 +proto-google-cloud-bigquerystorage-v1beta:3.19.1:3.19.1 +grpc-google-cloud-bigquerystorage-v1beta:3.19.1:3.19.1